diff --git a/.gitmodules b/.gitmodules
index 3f14953ec8f..e69de29bb2d 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,16 +0,0 @@
-[submodule "lib/ansible/modules/core"]
- path = lib/ansible/modules/core
- url = https://github.com/ansible/ansible-modules-core.git
- branch = devel
-[submodule "lib/ansible/modules/extras"]
- path = lib/ansible/modules/extras
- url = https://github.com/ansible/ansible-modules-extras.git
- branch = devel
-[submodule "v2/ansible/modules/core"]
- path = v2/ansible/modules/core
- url = https://github.com/ansible/ansible-modules-core.git
- branch = devel
-[submodule "v2/ansible/modules/extras"]
- path = v2/ansible/modules/extras
- url = https://github.com/ansible/ansible-modules-extras.git
- branch = devel
diff --git a/bin/ansible b/bin/ansible
index 7fec34ec81e..467dd505a2e 100755
--- a/bin/ansible
+++ b/bin/ansible
@@ -18,6 +18,8 @@
# along with Ansible. If not, see .
########################################################
+from __future__ import (absolute_import)
+__metaclass__ = type
__requires__ = ['ansible']
try:
@@ -33,175 +35,45 @@ except Exception:
import os
import sys
-from ansible.runner import Runner
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible import callbacks
-from ansible import inventory
-########################################################
-
-class Cli(object):
- ''' code behind bin/ansible '''
-
- # ----------------------------------------------
-
- def __init__(self):
- self.stats = callbacks.AggregateStats()
- self.callbacks = callbacks.CliRunnerCallbacks()
- if C.DEFAULT_LOAD_CALLBACK_PLUGINS:
- callbacks.load_callback_plugins()
-
- # ----------------------------------------------
-
- def parse(self):
- ''' create an options parser for bin/ansible '''
-
- parser = utils.base_parser(
- constants=C,
- runas_opts=True,
- subset_opts=True,
- async_opts=True,
- output_opts=True,
- connect_opts=True,
- check_opts=True,
- diff_opts=False,
- usage='%prog [options]'
- )
-
- parser.add_option('-a', '--args', dest='module_args',
- help="module arguments", default=C.DEFAULT_MODULE_ARGS)
- parser.add_option('-m', '--module-name', dest='module_name',
- help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
- default=C.DEFAULT_MODULE_NAME)
-
- options, args = parser.parse_args()
- self.callbacks.options = options
-
- if len(args) == 0 or len(args) > 1:
- parser.print_help()
- sys.exit(1)
-
- # privlege escalation command line arguments need to be mutually exclusive
- utils.check_mutually_exclusive_privilege(options, parser)
-
- if (options.ask_vault_pass and options.vault_password_file):
- parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
-
- return (options, args)
-
- # ----------------------------------------------
-
- def run(self, options, args):
- ''' use Runner lib to do SSH things '''
-
- pattern = args[0]
-
- sshpass = becomepass = vault_pass = become_method = None
-
- # Never ask for an SSH password when we run with local connection
- if options.connection == "local":
- options.ask_pass = False
- else:
- options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
-
- options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
-
- # become
- utils.normalize_become_options(options)
- prompt_method = utils.choose_pass_prompt(options)
- (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method)
-
- # read vault_pass from a file
- if not options.ask_vault_pass and options.vault_password_file:
- vault_pass = utils.read_vault_file(options.vault_password_file)
-
- extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
-
- inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass)
- if options.subset:
- inventory_manager.subset(options.subset)
- hosts = inventory_manager.list_hosts(pattern)
-
- if len(hosts) == 0:
- callbacks.display("No hosts matched", stderr=True)
- sys.exit(0)
-
- if options.listhosts:
- for host in hosts:
- callbacks.display(' %s' % host)
- sys.exit(0)
-
- if options.module_name in ['command','shell'] and not options.module_args:
- callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True)
- sys.exit(1)
-
- if options.tree:
- utils.prepare_writeable_dir(options.tree)
-
- runner = Runner(
- module_name=options.module_name,
- module_path=options.module_path,
- module_args=options.module_args,
- remote_user=options.remote_user,
- remote_pass=sshpass,
- inventory=inventory_manager,
- timeout=options.timeout,
- private_key_file=options.private_key_file,
- forks=options.forks,
- pattern=pattern,
- callbacks=self.callbacks,
- transport=options.connection,
- subset=options.subset,
- check=options.check,
- diff=options.check,
- vault_pass=vault_pass,
- become=options.become,
- become_method=options.become_method,
- become_pass=becomepass,
- become_user=options.become_user,
- extra_vars=extra_vars,
- )
-
- if options.seconds:
- callbacks.display("background launch...\n\n", color='cyan')
- results, poller = runner.run_async(options.seconds)
- results = self.poll_while_needed(poller, options)
- else:
- results = runner.run()
-
- return (runner, results)
-
- # ----------------------------------------------
-
- def poll_while_needed(self, poller, options):
- ''' summarize results from Runner '''
-
- # BACKGROUND POLL LOGIC when -B and -P are specified
- if options.seconds and options.poll_interval > 0:
- poller.wait(options.seconds, options.poll_interval)
-
- return poller.results
-
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.utils.display import Display
########################################################
if __name__ == '__main__':
- callbacks.display("", log_only=True)
- callbacks.display(" ".join(sys.argv), log_only=True)
- callbacks.display("", log_only=True)
- cli = Cli()
- (options, args) = cli.parse()
+ cli = None
+ display = Display()
+ me = os.path.basename(__file__)
+
try:
- (runner, results) = cli.run(options, args)
- for result in results['contacted'].values():
- if 'failed' in result or result.get('rc', 0) != 0:
- sys.exit(2)
- if results['dark']:
- sys.exit(3)
- except errors.AnsibleError, e:
- # Generic handler for ansible specific errors
- callbacks.display("ERROR: %s" % str(e), stderr=True, color='red')
- sys.exit(1)
+ if me == 'ansible-playbook':
+ from ansible.cli.playbook import PlaybookCLI as mycli
+ elif me == 'ansible':
+ from ansible.cli.adhoc import AdHocCLI as mycli
+ elif me == 'ansible-pull':
+ from ansible.cli.pull import PullCLI as mycli
+ elif me == 'ansible-doc':
+ from ansible.cli.doc import DocCLI as mycli
+ elif me == 'ansible-vault':
+ from ansible.cli.vault import VaultCLI as mycli
+ elif me == 'ansible-galaxy':
+ from ansible.cli.galaxy import GalaxyCLI as mycli
+ cli = mycli(sys.argv, display=display)
+ if cli:
+ cli.parse()
+ sys.exit(cli.run())
+ else:
+ raise AnsibleError("Program not implemented: %s" % me)
+
+ except AnsibleOptionsError as e:
+ cli.parser.print_help()
+ display.display(str(e), stderr=True, color='red')
+ sys.exit(1)
+ except AnsibleError as e:
+ display.display(str(e), stderr=True, color='red')
+ sys.exit(2)
+ except KeyboardInterrupt:
+ display.error("interrupted")
+ sys.exit(4)
diff --git a/bin/ansible-doc b/bin/ansible-doc
deleted file mode 100755
index dff7cecce79..00000000000
--- a/bin/ansible-doc
+++ /dev/null
@@ -1,337 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Jan-Piet Mens
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-
-import os
-import sys
-import textwrap
-import re
-import optparse
-import datetime
-import subprocess
-import fcntl
-import termios
-import struct
-
-from ansible import utils
-from ansible.utils import module_docs
-import ansible.constants as C
-from ansible.utils import version
-import traceback
-
-MODULEDIR = C.DEFAULT_MODULE_PATH
-
-BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
-IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
-
-_ITALIC = re.compile(r"I\(([^)]+)\)")
-_BOLD = re.compile(r"B\(([^)]+)\)")
-_MODULE = re.compile(r"M\(([^)]+)\)")
-_URL = re.compile(r"U\(([^)]+)\)")
-_CONST = re.compile(r"C\(([^)]+)\)")
-PAGER = 'less'
-LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
- # -S (chop long lines) -X (disable termcap init and de-init)
-
-def pager_print(text):
- ''' just print text '''
- print text
-
-def pager_pipe(text, cmd):
- ''' pipe text through a pager '''
- if 'LESS' not in os.environ:
- os.environ['LESS'] = LESS_OPTS
- try:
- cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
- cmd.communicate(input=text)
- except IOError:
- pass
- except KeyboardInterrupt:
- pass
-
-def pager(text):
- ''' find reasonable way to display text '''
- # this is a much simpler form of what is in pydoc.py
- if not sys.stdout.isatty():
- pager_print(text)
- elif 'PAGER' in os.environ:
- if sys.platform == 'win32':
- pager_print(text)
- else:
- pager_pipe(text, os.environ['PAGER'])
- elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
- pager_pipe(text, 'less')
- else:
- pager_print(text)
-
-def tty_ify(text):
-
- t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
- t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
- t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
- t = _URL.sub(r"\1", t) # U(word) => word
- t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
-
- return t
-
-def get_man_text(doc):
-
- opt_indent=" "
- text = []
- text.append("> %s\n" % doc['module'].upper())
-
- desc = " ".join(doc['description'])
-
- text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
-
- if 'option_keys' in doc and len(doc['option_keys']) > 0:
- text.append("Options (= is mandatory):\n")
-
- for o in sorted(doc['option_keys']):
- opt = doc['options'][o]
-
- if opt.get('required', False):
- opt_leadin = "="
- else:
- opt_leadin = "-"
-
- text.append("%s %s" % (opt_leadin, o))
-
- desc = " ".join(opt['description'])
-
- if 'choices' in opt:
- choices = ", ".join(str(i) for i in opt['choices'])
- desc = desc + " (Choices: " + choices + ")"
- if 'default' in opt:
- default = str(opt['default'])
- desc = desc + " [Default: " + default + "]"
- text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent,
- subsequent_indent=opt_indent))
-
- if 'notes' in doc and len(doc['notes']) > 0:
- notes = " ".join(doc['notes'])
- text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ",
- subsequent_indent=opt_indent))
-
-
- if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
- req = ", ".join(doc['requirements'])
- text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ",
- subsequent_indent=opt_indent))
-
- if 'examples' in doc and len(doc['examples']) > 0:
- text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
- for ex in doc['examples']:
- text.append("%s\n" % (ex['code']))
-
- if 'plainexamples' in doc and doc['plainexamples'] is not None:
- text.append("EXAMPLES:")
- text.append(doc['plainexamples'])
- if 'returndocs' in doc and doc['returndocs'] is not None:
- text.append("RETURN VALUES:")
- text.append(doc['returndocs'])
- text.append('')
-
- return "\n".join(text)
-
-
-def get_snippet_text(doc):
-
- text = []
- desc = tty_ify(" ".join(doc['short_description']))
- text.append("- name: %s" % (desc))
- text.append(" action: %s" % (doc['module']))
-
- for o in sorted(doc['options'].keys()):
- opt = doc['options'][o]
- desc = tty_ify(" ".join(opt['description']))
-
- if opt.get('required', False):
- s = o + "="
- else:
- s = o
-
- text.append(" %-20s # %s" % (s, desc))
- text.append('')
-
- return "\n".join(text)
-
-def get_module_list_text(module_list):
- tty_size = 0
- if os.isatty(0):
- tty_size = struct.unpack('HHHH',
- fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
- columns = max(60, tty_size)
- displace = max(len(x) for x in module_list)
- linelimit = columns - displace - 5
- text = []
- deprecated = []
- for module in sorted(set(module_list)):
-
- if module in module_docs.BLACKLIST_MODULES:
- continue
-
- filename = utils.plugins.module_finder.find_plugin(module)
-
- if filename is None:
- continue
- if filename.endswith(".ps1"):
- continue
- if os.path.isdir(filename):
- continue
-
- try:
- doc, plainexamples, returndocs = module_docs.get_docstring(filename)
- desc = tty_ify(doc.get('short_description', '?')).strip()
- if len(desc) > linelimit:
- desc = desc[:linelimit] + '...'
-
- if module.startswith('_'): # Handle deprecated
- deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
- else:
- text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
- except:
- traceback.print_exc()
- sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
-
- if len(deprecated) > 0:
- text.append("\nDEPRECATED:")
- text.extend(deprecated)
- return "\n".join(text)
-
-def find_modules(path, module_list):
-
- if os.path.isdir(path):
- for module in os.listdir(path):
- if module.startswith('.'):
- continue
- elif os.path.isdir(module):
- find_modules(module, module_list)
- elif any(module.endswith(x) for x in BLACKLIST_EXTS):
- continue
- elif module.startswith('__'):
- continue
- elif module in IGNORE_FILES:
- continue
- elif module.startswith('_'):
- fullpath = '/'.join([path,module])
- if os.path.islink(fullpath): # avoids aliases
- continue
-
- module = os.path.splitext(module)[0] # removes the extension
- module_list.append(module)
-
-def main():
-
- p = optparse.OptionParser(
- version=version("%prog"),
- usage='usage: %prog [options] [module...]',
- description='Show Ansible module documentation',
- )
-
- p.add_option("-M", "--module-path",
- action="store",
- dest="module_path",
- default=MODULEDIR,
- help="Ansible modules/ directory")
- p.add_option("-l", "--list",
- action="store_true",
- default=False,
- dest='list_dir',
- help='List available modules')
- p.add_option("-s", "--snippet",
- action="store_true",
- default=False,
- dest='show_snippet',
- help='Show playbook snippet for specified module(s)')
- p.add_option('-v', action='version', help='Show version number and exit')
-
- (options, args) = p.parse_args()
-
- if options.module_path is not None:
- for i in options.module_path.split(os.pathsep):
- utils.plugins.module_finder.add_directory(i)
-
- if options.list_dir:
- # list modules
- paths = utils.plugins.module_finder._get_paths()
- module_list = []
- for path in paths:
- find_modules(path, module_list)
-
- pager(get_module_list_text(module_list))
- sys.exit()
-
- if len(args) == 0:
- p.print_help()
-
- def print_paths(finder):
- ''' Returns a string suitable for printing of the search path '''
-
- # Uses a list to get the order right
- ret = []
- for i in finder._get_paths():
- if i not in ret:
- ret.append(i)
- return os.pathsep.join(ret)
-
- text = ''
- for module in args:
-
- filename = utils.plugins.module_finder.find_plugin(module)
- if filename is None:
- sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder)))
- continue
-
- if any(filename.endswith(x) for x in BLACKLIST_EXTS):
- continue
-
- try:
- doc, plainexamples, returndocs = module_docs.get_docstring(filename)
- except:
- traceback.print_exc()
- sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
- continue
-
- if doc is not None:
-
- all_keys = []
- for (k,v) in doc['options'].iteritems():
- all_keys.append(k)
- all_keys = sorted(all_keys)
- doc['option_keys'] = all_keys
-
- doc['filename'] = filename
- doc['docuri'] = doc['module'].replace('_', '-')
- doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
- doc['plainexamples'] = plainexamples
- doc['returndocs'] = returndocs
-
- if options.show_snippet:
- text += get_snippet_text(doc)
- else:
- text += get_man_text(doc)
- else:
- # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
- # probably a quoting issue.
- sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module)
- pager(text)
-
-if __name__ == '__main__':
- main()
diff --git a/bin/ansible-doc b/bin/ansible-doc
new file mode 120000
index 00000000000..cabb1f519aa
--- /dev/null
+++ b/bin/ansible-doc
@@ -0,0 +1 @@
+ansible
\ No newline at end of file
diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy
deleted file mode 100755
index a6d625671ec..00000000000
--- a/bin/ansible-galaxy
+++ /dev/null
@@ -1,957 +0,0 @@
-#!/usr/bin/env python
-
-########################################################################
-#
-# (C) 2013, James Cammarata
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-########################################################################
-
-import datetime
-import json
-import os
-import os.path
-import shutil
-import subprocess
-import sys
-import tarfile
-import tempfile
-import urllib
-import urllib2
-import yaml
-
-from collections import defaultdict
-from distutils.version import LooseVersion
-from jinja2 import Environment
-from optparse import OptionParser
-
-import ansible.constants as C
-import ansible.utils
-from ansible.errors import AnsibleError
-
-default_meta_template = """---
-galaxy_info:
- author: {{ author }}
- description: {{description}}
- company: {{ company }}
- # If the issue tracker for your role is not on github, uncomment the
- # next line and provide a value
- # issue_tracker_url: {{ issue_tracker_url }}
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: {{ license }}
- min_ansible_version: {{ min_ansible_version }}
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- {%- for platform,versions in platforms.iteritems() %}
- #- name: {{ platform }}
- # versions:
- # - all
- {%- for version in versions %}
- # - {{ version }}
- {%- endfor %}
- {%- endfor %}
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- {%- for category in categories %}
- #- {{ category.name }}
- {%- endfor %}
-dependencies: []
- # List your role dependencies here, one per line.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
- {% for dependency in dependencies %}
- #- {{ dependency }}
- {% endfor %}
-
-"""
-
-default_readme_template = """Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
-"""
-
-#-------------------------------------------------------------------------------------
-# Utility functions for parsing actions/options
-#-------------------------------------------------------------------------------------
-
-VALID_ACTIONS = ("init", "info", "install", "list", "remove")
-SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
-
-def get_action(args):
- """
- Get the action the user wants to execute from the
- sys argv list.
- """
- for i in range(0,len(args)):
- arg = args[i]
- if arg in VALID_ACTIONS:
- del args[i]
- return arg
- return None
-
-def build_option_parser(action):
- """
- Builds an option parser object based on the action
- the user wants to execute.
- """
-
- usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS)
- epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
- OptionParser.format_epilog = lambda self, formatter: self.epilog
- parser = OptionParser(usage=usage, epilog=epilog)
-
- if not action:
- parser.print_help()
- sys.exit()
-
- # options for all actions
- # - none yet
-
- # options specific to actions
- if action == "info":
- parser.set_usage("usage: %prog info [options] role_name[,version]")
- elif action == "init":
- parser.set_usage("usage: %prog init [options] role_name")
- parser.add_option(
- '-p', '--init-path', dest='init_path', default="./",
- help='The path in which the skeleton role will be created. '
- 'The default is the current working directory.')
- parser.add_option(
- '--offline', dest='offline', default=False, action='store_true',
- help="Don't query the galaxy API when creating roles")
- elif action == "install":
- parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
- parser.add_option(
- '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
- help='Ignore errors and continue with the next specified role.')
- parser.add_option(
- '-n', '--no-deps', dest='no_deps', action='store_true', default=False,
- help='Don\'t download roles listed as dependencies')
- parser.add_option(
- '-r', '--role-file', dest='role_file',
- help='A file containing a list of roles to be imported')
- elif action == "remove":
- parser.set_usage("usage: %prog remove role1 role2 ...")
- elif action == "list":
- parser.set_usage("usage: %prog list [role_name]")
-
- # options that apply to more than one action
- if action != "init":
- parser.add_option(
- '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
- help='The path to the directory containing your roles. '
- 'The default is the roles_path configured in your '
- 'ansible.cfg file (/etc/ansible/roles if not configured)')
-
- if action in ("info","init","install"):
- parser.add_option(
- '-s', '--server', dest='api_server', default="galaxy.ansible.com",
- help='The API server destination')
-
- if action in ("init","install"):
- parser.add_option(
- '-f', '--force', dest='force', action='store_true', default=False,
- help='Force overwriting an existing role')
- # done, return the parser
- return parser
-
-def get_opt(options, k, defval=""):
- """
- Returns an option from an Optparse values instance.
- """
- try:
- data = getattr(options, k)
- except:
- return defval
- if k == "roles_path":
- if os.pathsep in data:
- data = data.split(os.pathsep)[0]
- return data
-
-def exit_without_ignore(options, rc=1):
- """
- Exits with the specified return code unless the
- option --ignore-errors was specified
- """
-
- if not get_opt(options, "ignore_errors", False):
- print '- you can use --ignore-errors to skip failed roles.'
- sys.exit(rc)
-
-
-#-------------------------------------------------------------------------------------
-# Galaxy API functions
-#-------------------------------------------------------------------------------------
-
-def api_get_config(api_server):
- """
- Fetches the Galaxy API current version to ensure
- the API server is up and reachable.
- """
-
- try:
- url = 'https://%s/api/' % api_server
- data = json.load(urllib2.urlopen(url))
- if not data.get("current_version",None):
- return None
- else:
- return data
- except:
- return None
-
-def api_lookup_role_by_name(api_server, role_name, notify=True):
- """
- Uses the Galaxy API to do a lookup on the role owner/name.
- """
-
- role_name = urllib.quote(role_name)
-
- try:
- parts = role_name.split(".")
- user_name = ".".join(parts[0:-1])
- role_name = parts[-1]
- if notify:
- print "- downloading role '%s', owned by %s" % (role_name, user_name)
- except:
- parser.print_help()
- print "- invalid role name (%s). Specify role as format: username.rolename" % role_name
- sys.exit(1)
-
- url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name)
- try:
- data = json.load(urllib2.urlopen(url))
- if len(data["results"]) == 0:
- return None
- else:
- return data["results"][0]
- except:
- return None
-
-def api_fetch_role_related(api_server, related, role_id):
- """
- Uses the Galaxy API to fetch the list of related items for
- the given role. The url comes from the 'related' field of
- the role.
- """
-
- try:
- url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related)
- data = json.load(urllib2.urlopen(url))
- results = data['results']
- done = (data.get('next', None) == None)
- while not done:
- url = 'https://%s%s' % (api_server, data['next'])
- print url
- data = json.load(urllib2.urlopen(url))
- results += data['results']
- done = (data.get('next', None) == None)
- return results
- except:
- return None
-
-def api_get_list(api_server, what):
- """
- Uses the Galaxy API to fetch the list of items specified.
- """
-
- try:
- url = 'https://%s/api/v1/%s/?page_size' % (api_server, what)
- data = json.load(urllib2.urlopen(url))
- if "results" in data:
- results = data['results']
- else:
- results = data
- done = True
- if "next" in data:
- done = (data.get('next', None) == None)
- while not done:
- url = 'https://%s%s' % (api_server, data['next'])
- print url
- data = json.load(urllib2.urlopen(url))
- results += data['results']
- done = (data.get('next', None) == None)
- return results
- except:
- print "- failed to download the %s list" % what
- return None
-
-#-------------------------------------------------------------------------------------
-# scm repo utility functions
-#-------------------------------------------------------------------------------------
-
-def scm_archive_role(scm, role_url, role_version, role_name):
- if scm not in ['hg', 'git']:
- print "- scm %s is not currently supported" % scm
- return False
- tempdir = tempfile.mkdtemp()
- clone_cmd = [scm, 'clone', role_url, role_name]
- with open('/dev/null', 'w') as devnull:
- try:
- print "- executing: %s" % " ".join(clone_cmd)
- popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
- except:
- raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
- rc = popen.wait()
- if rc != 0:
- print "- command %s failed" % ' '.join(clone_cmd)
- print " in directory %s" % tempdir
- return False
-
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
- if scm == 'hg':
- archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name]
- if role_version:
- archive_cmd.extend(['-r', role_version])
- archive_cmd.append(temp_file.name)
- if scm == 'git':
- archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name]
- if role_version:
- archive_cmd.append(role_version)
- else:
- archive_cmd.append('HEAD')
-
- with open('/dev/null', 'w') as devnull:
- print "- executing: %s" % " ".join(archive_cmd)
- popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name),
- stderr=devnull, stdout=devnull)
- rc = popen.wait()
- if rc != 0:
- print "- command %s failed" % ' '.join(archive_cmd)
- print " in directory %s" % tempdir
- return False
-
- shutil.rmtree(tempdir, ignore_errors=True)
-
- return temp_file.name
-
-
-#-------------------------------------------------------------------------------------
-# Role utility functions
-#-------------------------------------------------------------------------------------
-
-def get_role_path(role_name, options):
- """
- Returns the role path based on the roles_path option
- and the role name.
- """
- roles_path = get_opt(options,'roles_path')
- roles_path = os.path.join(roles_path, role_name)
- roles_path = os.path.expanduser(roles_path)
- return roles_path
-
-def get_role_metadata(role_name, options):
- """
- Returns the metadata as YAML, if the file 'meta/main.yml'
- exists in the specified role_path
- """
- role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml')
- try:
- if os.path.isfile(role_path):
- f = open(role_path, 'r')
- meta_data = yaml.safe_load(f)
- f.close()
- return meta_data
- else:
- return None
- except:
- return None
-
-def get_galaxy_install_info(role_name, options):
- """
- Returns the YAML data contained in 'meta/.galaxy_install_info',
- if it exists.
- """
-
- try:
- info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
- if os.path.isfile(info_path):
- f = open(info_path, 'r')
- info_data = yaml.safe_load(f)
- f.close()
- return info_data
- else:
- return None
- except:
- return None
-
-def write_galaxy_install_info(role_name, role_version, options):
- """
- Writes a YAML-formatted file to the role's meta/ directory
- (named .galaxy_install_info) which contains some information
- we can use later for commands like 'list' and 'info'.
- """
-
- info = dict(
- version = role_version,
- install_date = datetime.datetime.utcnow().strftime("%c"),
- )
- try:
- info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
- f = open(info_path, 'w+')
- info_data = yaml.safe_dump(info, f)
- f.close()
- except:
- return False
- return True
-
-
-def remove_role(role_name, options):
- """
- Removes the specified role from the roles path. There is a
- sanity check to make sure there's a meta/main.yml file at this
- path so the user doesn't blow away random directories
- """
- if get_role_metadata(role_name, options):
- role_path = get_role_path(role_name, options)
- shutil.rmtree(role_path)
- return True
- else:
- return False
-
-def fetch_role(role_name, target, role_data, options):
- """
- Downloads the archived role from github to a temp location, extracts
- it, and then copies the extracted role to the role library path.
- """
-
- # first grab the file and save it to a temp location
- if '://' in role_name:
- archive_url = role_name
- else:
- archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
- print "- downloading role from %s" % archive_url
-
- try:
- url_file = urllib2.urlopen(archive_url)
- temp_file = tempfile.NamedTemporaryFile(delete=False)
- data = url_file.read()
- while data:
- temp_file.write(data)
- data = url_file.read()
- temp_file.close()
- return temp_file.name
- except Exception, e:
- # TODO: better urllib2 error handling for error
- # messages that are more exact
- print "- error: failed to download the file."
- return False
-
-def install_role(role_name, role_version, role_filename, options):
- # the file is a tar, so open it that way and extract it
- # to the specified (or default) roles directory
-
- if not tarfile.is_tarfile(role_filename):
- print "- error: the file downloaded was not a tar.gz"
- return False
- else:
- if role_filename.endswith('.gz'):
- role_tar_file = tarfile.open(role_filename, "r:gz")
- else:
- role_tar_file = tarfile.open(role_filename, "r")
- # verify the role's meta file
- meta_file = None
- members = role_tar_file.getmembers()
- # next find the metadata file
- for member in members:
- if "/meta/main.yml" in member.name:
- meta_file = member
- break
- if not meta_file:
- print "- error: this role does not appear to have a meta/main.yml file."
- return False
- else:
- try:
- meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file))
- except:
- print "- error: this role does not appear to have a valid meta/main.yml file."
- return False
-
- # we strip off the top-level directory for all of the files contained within
- # the tar file here, since the default is 'github_repo-target', and change it
- # to the specified role's name
- role_path = os.path.join(get_opt(options, 'roles_path'), role_name)
- role_path = os.path.expanduser(role_path)
- print "- extracting %s to %s" % (role_name, role_path)
- try:
- if os.path.exists(role_path):
- if not os.path.isdir(role_path):
- print "- error: the specified roles path exists and is not a directory."
- return False
- elif not get_opt(options, "force", False):
- print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name
- return False
- else:
- # using --force, remove the old path
- if not remove_role(role_name, options):
- print "- error: %s doesn't appear to contain a role." % role_path
- print " please remove this directory manually if you really want to put the role here."
- return False
- else:
- os.makedirs(role_path)
-
- # now we do the actual extraction to the role_path
- for member in members:
- # we only extract files, and remove any relative path
- # bits that might be in the file for security purposes
- # and drop the leading directory, as mentioned above
- if member.isreg() or member.issym():
- parts = member.name.split("/")[1:]
- final_parts = []
- for part in parts:
- if part != '..' and '~' not in part and '$' not in part:
- final_parts.append(part)
- member.name = os.path.join(*final_parts)
- role_tar_file.extract(member, role_path)
-
- # write out the install info file for later use
- write_galaxy_install_info(role_name, role_version, options)
- except OSError, e:
- print "- error: you do not have permission to modify files in %s" % role_path
- return False
-
- # return the parsed yaml metadata
- print "- %s was installed successfully" % role_name
- return meta_file_data
-
-#-------------------------------------------------------------------------------------
-# Action functions
-#-------------------------------------------------------------------------------------
-
-def execute_init(args, options, parser):
- """
- Executes the init action, which creates the skeleton framework
- of a role that complies with the galaxy metadata format.
- """
-
- init_path = get_opt(options, 'init_path', './')
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- force = get_opt(options, 'force', False)
- offline = get_opt(options, 'offline', False)
-
- if not offline:
- api_config = api_get_config(api_server)
- if not api_config:
- print "- the API server (%s) is not responding, please try again later." % api_server
- sys.exit(1)
-
- try:
- role_name = args.pop(0).strip()
- if role_name == "":
- raise Exception("")
- role_path = os.path.join(init_path, role_name)
- if os.path.exists(role_path):
- if os.path.isfile(role_path):
- print "- the path %s already exists, but is a file - aborting" % role_path
- sys.exit(1)
- elif not force:
- print "- the directory %s already exists." % role_path
- print " you can use --force to re-initialize this directory,\n" + \
- " however it will reset any main.yml files that may have\n" + \
- " been modified there already."
- sys.exit(1)
- except Exception, e:
- parser.print_help()
- print "- no role name specified for init"
- sys.exit(1)
-
- ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
-
- # create the default README.md
- if not os.path.exists(role_path):
- os.makedirs(role_path)
- readme_path = os.path.join(role_path, "README.md")
- f = open(readme_path, "wb")
- f.write(default_readme_template)
- f.close
-
- for dir in ROLE_DIRS:
- dir_path = os.path.join(init_path, role_name, dir)
- main_yml_path = os.path.join(dir_path, 'main.yml')
- # create the directory if it doesn't exist already
- if not os.path.exists(dir_path):
- os.makedirs(dir_path)
-
- # now create the main.yml file for that directory
- if dir == "meta":
- # create a skeleton meta/main.yml with a valid galaxy_info
- # datastructure in place, plus with all of the available
- # tags/platforms included (but commented out) and the
- # dependencies section
- platforms = []
- if not offline:
- platforms = api_get_list(api_server, "platforms") or []
- categories = []
- if not offline:
- categories = api_get_list(api_server, "categories") or []
-
- # group the list of platforms from the api based
- # on their names, with the release field being
- # appended to a list of versions
- platform_groups = defaultdict(list)
- for platform in platforms:
- platform_groups[platform['name']].append(platform['release'])
- platform_groups[platform['name']].sort()
-
- inject = dict(
- author = 'your name',
- company = 'your company (optional)',
- license = 'license (GPLv2, CC-BY, etc)',
- issue_tracker_url = 'http://example.com/issue/tracker',
- min_ansible_version = '1.2',
- platforms = platform_groups,
- categories = categories,
- )
- rendered_meta = Environment().from_string(default_meta_template).render(inject)
- f = open(main_yml_path, 'w')
- f.write(rendered_meta)
- f.close()
- pass
- elif dir not in ('files','templates'):
- # just write a (mostly) empty YAML file for main.yml
- f = open(main_yml_path, 'w')
- f.write('---\n# %s file for %s\n' % (dir,role_name))
- f.close()
- print "- %s was created successfully" % role_name
-
-def execute_info(args, options, parser):
- """
- Executes the info action. This action prints out detailed
- information about an installed role as well as info available
- from the galaxy API.
- """
-
- if len(args) == 0:
- # the user needs to specify a role
- parser.print_help()
- print "- you must specify a user/role name"
- sys.exit(1)
-
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- api_config = api_get_config(api_server)
- roles_path = get_opt(options, "roles_path")
-
- for role in args:
-
- role_info = {}
-
- install_info = get_galaxy_install_info(role, options)
- if install_info:
- if 'version' in install_info:
- install_info['intalled_version'] = install_info['version']
- del install_info['version']
- role_info.update(install_info)
-
- remote_data = api_lookup_role_by_name(api_server, role, False)
- if remote_data:
- role_info.update(remote_data)
-
- metadata = get_role_metadata(role, options)
- if metadata:
- role_info.update(metadata)
-
- role_spec = ansible.utils.role_spec_parse(role)
- if role_spec:
- role_info.update(role_spec)
-
- if role_info:
- print "- %s:" % (role)
- for k in sorted(role_info.keys()):
-
- if k in SKIP_INFO_KEYS:
- continue
-
- if isinstance(role_info[k], dict):
- print "\t%s: " % (k)
- for key in sorted(role_info[k].keys()):
- if key in SKIP_INFO_KEYS:
- continue
- print "\t\t%s: %s" % (key, role_info[k][key])
- else:
- print "\t%s: %s" % (k, role_info[k])
- else:
- print "- the role %s was not found" % role
-
-def execute_install(args, options, parser):
- """
- Executes the installation action. The args list contains the
- roles to be installed, unless -f was specified. The list of roles
- can be a name (which will be downloaded via the galaxy API and github),
- or it can be a local .tar.gz file.
- """
-
- role_file = get_opt(options, "role_file", None)
-
- if len(args) == 0 and role_file is None:
- # the user needs to specify one of either --role-file
- # or specify a single user/role name
- parser.print_help()
- print "- you must specify a user/role name or a roles file"
- sys.exit()
- elif len(args) == 1 and not role_file is None:
- # using a role file is mutually exclusive of specifying
- # the role name on the command line
- parser.print_help()
- print "- please specify a user/role name, or a roles file, but not both"
- sys.exit(1)
-
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- no_deps = get_opt(options, "no_deps", False)
- roles_path = get_opt(options, "roles_path")
-
- roles_done = []
- if role_file:
- f = open(role_file, 'r')
- if role_file.endswith('.yaml') or role_file.endswith('.yml'):
- roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f))
- else:
- # roles listed in a file, one per line
- roles_left = map(ansible.utils.role_spec_parse, f.readlines())
- f.close()
- else:
- # roles were specified directly, so we'll just go out grab them
- # (and their dependencies, unless the user doesn't want us to).
- roles_left = map(ansible.utils.role_spec_parse, args)
-
- while len(roles_left) > 0:
- # query the galaxy API for the role data
- role_data = None
- role = roles_left.pop(0)
- role_src = role.get("src")
- role_scm = role.get("scm")
- role_path = role.get("path")
-
- if role_path:
- options.roles_path = role_path
- else:
- options.roles_path = roles_path
-
- if os.path.isfile(role_src):
- # installing a local tar.gz
- tmp_file = role_src
- else:
- if role_scm:
- # create tar file from scm url
- tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name"))
- elif '://' in role_src:
- # just download a URL - version will probably be in the URL
- tmp_file = fetch_role(role_src, None, None, options)
- else:
- # installing from galaxy
- api_config = api_get_config(api_server)
- if not api_config:
- print "- the API server (%s) is not responding, please try again later." % api_server
- sys.exit(1)
-
- role_data = api_lookup_role_by_name(api_server, role_src)
- if not role_data:
- print "- sorry, %s was not found on %s." % (role_src, api_server)
- exit_without_ignore(options)
- continue
-
- role_versions = api_fetch_role_related(api_server, 'versions', role_data['id'])
- if "version" not in role or role['version'] == '':
- # convert the version names to LooseVersion objects
- # and sort them to get the latest version. If there
- # are no versions in the list, we'll grab the head
- # of the master branch
- if len(role_versions) > 0:
- loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
- loose_versions.sort()
- role["version"] = str(loose_versions[-1])
- else:
- role["version"] = 'master'
- elif role['version'] != 'master':
- if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]:
- print 'role is %s' % role
- print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions)
- exit_without_ignore(options)
- continue
-
- # download the role. if --no-deps was specified, we stop here,
- # otherwise we recursively grab roles and all of their deps.
- tmp_file = fetch_role(role_src, role["version"], role_data, options)
- installed = False
- if tmp_file:
- installed = install_role(role.get("name"), role.get("version"), tmp_file, options)
- # we're done with the temp file, clean it up
- if tmp_file != role_src:
- os.unlink(tmp_file)
- # install dependencies, if we want them
- if not no_deps and installed:
- if not role_data:
- role_data = get_role_metadata(role.get("name"), options)
- role_dependencies = role_data['dependencies']
- else:
- role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
- for dep in role_dependencies:
- if isinstance(dep, basestring):
- dep = ansible.utils.role_spec_parse(dep)
- else:
- dep = ansible.utils.role_yaml_parse(dep)
- if not get_role_metadata(dep["name"], options):
- if dep not in roles_left:
- print '- adding dependency: %s' % dep["name"]
- roles_left.append(dep)
- else:
- print '- dependency %s already pending installation.' % dep["name"]
- else:
- print '- dependency %s is already installed, skipping.' % dep["name"]
- if not tmp_file or not installed:
- print "- %s was NOT installed successfully." % role.get("name")
- exit_without_ignore(options)
- sys.exit(0)
-
-def execute_remove(args, options, parser):
- """
- Executes the remove action. The args list contains the list
- of roles to be removed. This list can contain more than one role.
- """
-
- if len(args) == 0:
- parser.print_help()
- print '- you must specify at least one role to remove.'
- sys.exit()
-
- for role in args:
- if get_role_metadata(role, options):
- if remove_role(role, options):
- print '- successfully removed %s' % role
- else:
- print "- failed to remove role: %s" % role
- else:
- print '- %s is not installed, skipping.' % role
- sys.exit(0)
-
-def execute_list(args, options, parser):
- """
- Executes the list action. The args list can contain zero
- or one role. If one is specified, only that role will be
- shown, otherwise all roles in the specified directory will
- be shown.
- """
-
- if len(args) > 1:
- print "- please specify only one role to list, or specify no roles to see a full list"
- sys.exit(1)
-
- if len(args) == 1:
- # show only the request role, if it exists
- role_name = args[0]
- metadata = get_role_metadata(role_name, options)
- if metadata:
- install_info = get_galaxy_install_info(role_name, options)
- version = None
- if install_info:
- version = install_info.get("version", None)
- if not version:
- version = "(unknown version)"
- # show some more info about single roles here
- print "- %s, %s" % (role_name, version)
- else:
- print "- the role %s was not found" % role_name
- else:
- # show all valid roles in the roles_path directory
- roles_path = get_opt(options, 'roles_path')
- roles_path = os.path.expanduser(roles_path)
- if not os.path.exists(roles_path):
- parser.print_help()
- print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path
- sys.exit(1)
- elif not os.path.isdir(roles_path):
- print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path
- parser.print_help()
- sys.exit(1)
- path_files = os.listdir(roles_path)
- for path_file in path_files:
- if get_role_metadata(path_file, options):
- install_info = get_galaxy_install_info(path_file, options)
- version = None
- if install_info:
- version = install_info.get("version", None)
- if not version:
- version = "(unknown version)"
- print "- %s, %s" % (path_file, version)
- sys.exit(0)
-
-#-------------------------------------------------------------------------------------
-# The main entry point
-#-------------------------------------------------------------------------------------
-
-def main():
- # parse the CLI options
- action = get_action(sys.argv)
- parser = build_option_parser(action)
- (options, args) = parser.parse_args()
-
- # execute the desired action
- if 1: #try:
- fn = globals()["execute_%s" % action]
- fn(args, options, parser)
- #except KeyError, e:
- # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS))
- # sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy
new file mode 120000
index 00000000000..cabb1f519aa
--- /dev/null
+++ b/bin/ansible-galaxy
@@ -0,0 +1 @@
+ansible
\ No newline at end of file
diff --git a/bin/ansible-playbook b/bin/ansible-playbook
deleted file mode 100755
index 3d6e1f9f402..00000000000
--- a/bin/ansible-playbook
+++ /dev/null
@@ -1,330 +0,0 @@
-#!/usr/bin/env python
-# (C) 2012, Michael DeHaan,
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-#######################################################
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import sys
-import os
-import stat
-
-# Augment PYTHONPATH to find Python modules relative to this file path
-# This is so that we can find the modules when running from a local checkout
-# installed as editable with `pip install -e ...` or `python setup.py develop`
-local_module_path = os.path.abspath(
- os.path.join(os.path.dirname(__file__), '..', 'lib')
-)
-sys.path.append(local_module_path)
-
-import ansible.playbook
-import ansible.constants as C
-import ansible.utils.template
-from ansible import errors
-from ansible import callbacks
-from ansible import utils
-from ansible.color import ANSIBLE_COLOR, stringc
-from ansible.callbacks import display
-
-def colorize(lead, num, color):
- """ Print 'lead' = 'num' in 'color' """
- if num != 0 and ANSIBLE_COLOR and color is not None:
- return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
- else:
- return "%s=%-4s" % (lead, str(num))
-
-def hostcolor(host, stats, color=True):
- if ANSIBLE_COLOR and color:
- if stats['failures'] != 0 or stats['unreachable'] != 0:
- return "%-37s" % stringc(host, 'red')
- elif stats['changed'] != 0:
- return "%-37s" % stringc(host, 'yellow')
- else:
- return "%-37s" % stringc(host, 'green')
- return "%-26s" % host
-
-
-def main(args):
- ''' run ansible-playbook operations '''
-
- # create parser for CLI options
- parser = utils.base_parser(
- constants=C,
- usage = "%prog playbook.yml",
- connect_opts=True,
- runas_opts=True,
- subset_opts=True,
- check_opts=True,
- diff_opts=True
- )
- #parser.add_option('--vault-password', dest="vault_password",
- # help="password for vault encrypted files")
- parser.add_option('-t', '--tags', dest='tags', default='all',
- help="only run plays and tasks tagged with these values")
- parser.add_option('--skip-tags', dest='skip_tags',
- help="only run plays and tasks whose tags do not match these values")
- parser.add_option('--syntax-check', dest='syntax', action='store_true',
- help="perform a syntax check on the playbook, but do not execute it")
- parser.add_option('--list-tasks', dest='listtasks', action='store_true',
- help="list all tasks that would be executed")
- parser.add_option('--list-tags', dest='listtags', action='store_true',
- help="list all available tags")
- parser.add_option('--step', dest='step', action='store_true',
- help="one-step-at-a-time: confirm each task before running")
- parser.add_option('--start-at-task', dest='start_at',
- help="start the playbook at the task matching this name")
- parser.add_option('--force-handlers', dest='force_handlers',
- default=C.DEFAULT_FORCE_HANDLERS, action='store_true',
- help="run handlers even if a task fails")
- parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
- help="clear the fact cache")
-
- options, args = parser.parse_args(args)
-
- if len(args) == 0:
- parser.print_help(file=sys.stderr)
- return 1
-
- # privlege escalation command line arguments need to be mutually exclusive
- utils.check_mutually_exclusive_privilege(options, parser)
-
- if (options.ask_vault_pass and options.vault_password_file):
- parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
-
- sshpass = None
- becomepass = None
- vault_pass = None
-
- options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
-
- if options.listhosts or options.syntax or options.listtasks or options.listtags:
- (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass)
- else:
- options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
- # Never ask for an SSH password when we run with local connection
- if options.connection == "local":
- options.ask_pass = False
-
- # set pe options
- utils.normalize_become_options(options)
- prompt_method = utils.choose_pass_prompt(options)
- (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass,
- become_ask_pass=options.become_ask_pass,
- ask_vault_pass=options.ask_vault_pass,
- become_method=prompt_method)
-
- # read vault_pass from a file
- if not options.ask_vault_pass and options.vault_password_file:
- vault_pass = utils.read_vault_file(options.vault_password_file)
-
- extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
-
- only_tags = options.tags.split(",")
- skip_tags = options.skip_tags
- if options.skip_tags is not None:
- skip_tags = options.skip_tags.split(",")
-
- for playbook in args:
- if not os.path.exists(playbook):
- raise errors.AnsibleError("the playbook: %s could not be found" % playbook)
- if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
- raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook)
-
- inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass)
-
- # Note: slightly wrong, this is written so that implicit localhost
- # (which is not returned in list_hosts()) is taken into account for
- # warning if inventory is empty. But it can't be taken into account for
- # checking if limit doesn't match any hosts. Instead we don't worry about
- # limit if only implicit localhost was in inventory to start with.
- #
- # Fix this in v2
- no_hosts = False
- if len(inventory.list_hosts()) == 0:
- # Empty inventory
- utils.warning("provided hosts list is empty, only localhost is available")
- no_hosts = True
- inventory.subset(options.subset)
- if len(inventory.list_hosts()) == 0 and no_hosts is False:
- # Invalid limit
- raise errors.AnsibleError("Specified --limit does not match any hosts")
-
- # run all playbooks specified on the command line
- for playbook in args:
-
- stats = callbacks.AggregateStats()
- playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
- if options.step:
- playbook_cb.step = options.step
- if options.start_at:
- playbook_cb.start_at = options.start_at
- runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
-
- pb = ansible.playbook.PlayBook(
- playbook=playbook,
- module_path=options.module_path,
- inventory=inventory,
- forks=options.forks,
- remote_user=options.remote_user,
- remote_pass=sshpass,
- callbacks=playbook_cb,
- runner_callbacks=runner_cb,
- stats=stats,
- timeout=options.timeout,
- transport=options.connection,
- become=options.become,
- become_method=options.become_method,
- become_user=options.become_user,
- become_pass=becomepass,
- extra_vars=extra_vars,
- private_key_file=options.private_key_file,
- only_tags=only_tags,
- skip_tags=skip_tags,
- check=options.check,
- diff=options.diff,
- vault_password=vault_pass,
- force_handlers=options.force_handlers,
- )
-
- if options.flush_cache:
- display(callbacks.banner("FLUSHING FACT CACHE"))
- pb.SETUP_CACHE.flush()
-
- if options.listhosts or options.listtasks or options.syntax or options.listtags:
- print ''
- print 'playbook: %s' % playbook
- print ''
- playnum = 0
- for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs):
- playnum += 1
- play = ansible.playbook.Play(pb, play_ds, play_basedir,
- vault_password=pb.vault_password)
- label = play.name
- hosts = pb.inventory.list_hosts(play.hosts)
-
- if options.listhosts:
- print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts))
- for host in hosts:
- print ' %s' % host
-
- if options.listtags or options.listtasks:
- print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags))))
-
- if options.listtags:
- tags = []
- for task in pb.tasks_to_run_in_play(play):
- tags.extend(task.tags)
- print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged']))))
-
- if options.listtasks:
-
- for task in pb.tasks_to_run_in_play(play):
- if getattr(task, 'name', None) is not None:
- # meta tasks have no names
- print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged']))))
-
- if options.listhosts or options.listtasks or options.listtags:
- print ''
- continue
-
- if options.syntax:
- # if we've not exited by now then we are fine.
- print 'Playbook Syntax is fine'
- return 0
-
- failed_hosts = []
- unreachable_hosts = []
-
- try:
-
- pb.run()
-
- hosts = sorted(pb.stats.processed.keys())
- display(callbacks.banner("PLAY RECAP"))
- playbook_cb.on_stats(pb.stats)
-
- for h in hosts:
- t = pb.stats.summarize(h)
- if t['failures'] > 0:
- failed_hosts.append(h)
- if t['unreachable'] > 0:
- unreachable_hosts.append(h)
-
- retries = failed_hosts + unreachable_hosts
-
- if C.RETRY_FILES_ENABLED and len(retries) > 0:
- filename = pb.generate_retry_inventory(retries)
- if filename:
- display(" to retry, use: --limit @%s\n" % filename)
-
- for h in hosts:
- t = pb.stats.summarize(h)
-
- display("%s : %s %s %s %s" % (
- hostcolor(h, t),
- colorize('ok', t['ok'], 'green'),
- colorize('changed', t['changed'], 'yellow'),
- colorize('unreachable', t['unreachable'], 'red'),
- colorize('failed', t['failures'], 'red')),
- screen_only=True
- )
-
- display("%s : %s %s %s %s" % (
- hostcolor(h, t, False),
- colorize('ok', t['ok'], None),
- colorize('changed', t['changed'], None),
- colorize('unreachable', t['unreachable'], None),
- colorize('failed', t['failures'], None)),
- log_only=True
- )
-
-
- print ""
- if len(failed_hosts) > 0:
- return 2
- if len(unreachable_hosts) > 0:
- return 3
-
- except errors.AnsibleError, e:
- display("ERROR: %s" % e, color='red')
- return 1
-
- return 0
-
-
-if __name__ == "__main__":
- display(" ", log_only=True)
- display(" ".join(sys.argv), log_only=True)
- display(" ", log_only=True)
- try:
- sys.exit(main(sys.argv[1:]))
- except errors.AnsibleError, e:
- display("ERROR: %s" % e, color='red', stderr=True)
- sys.exit(1)
- except KeyboardInterrupt, ke:
- display("ERROR: interrupted", color='red', stderr=True)
- sys.exit(1)
diff --git a/bin/ansible-playbook b/bin/ansible-playbook
new file mode 120000
index 00000000000..cabb1f519aa
--- /dev/null
+++ b/bin/ansible-playbook
@@ -0,0 +1 @@
+ansible
\ No newline at end of file
diff --git a/bin/ansible-pull b/bin/ansible-pull
deleted file mode 100755
index d4887631e0f..00000000000
--- a/bin/ansible-pull
+++ /dev/null
@@ -1,257 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Stephen Fromm
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-# ansible-pull is a script that runs ansible in local mode
-# after checking out a playbooks directory from source repo. There is an
-# example playbook to bootstrap this script in the examples/ dir which
-# installs ansible and sets it up to run on cron.
-
-# usage:
-# ansible-pull -d /var/lib/ansible \
-# -U http://example.net/content.git [-C production] \
-# [path/playbook.yml]
-#
-# the -d and -U arguments are required; the -C argument is optional.
-#
-# ansible-pull accepts an optional argument to specify a playbook
-# location underneath the workdir and then searches the source repo
-# for playbooks in the following order, stopping at the first match:
-#
-# 1. $workdir/path/playbook.yml, if specified
-# 2. $workdir/$fqdn.yml
-# 3. $workdir/$hostname.yml
-# 4. $workdir/local.yml
-#
-# the source repo must contain at least one of these playbooks.
-
-import os
-import shutil
-import sys
-import datetime
-import socket
-import random
-import time
-from ansible import utils
-from ansible.utils import cmd_functions
-from ansible import errors
-from ansible import inventory
-
-DEFAULT_REPO_TYPE = 'git'
-DEFAULT_PLAYBOOK = 'local.yml'
-PLAYBOOK_ERRORS = {1: 'File does not exist',
- 2: 'File is not readable'}
-
-VERBOSITY=0
-
-def increment_debug(option, opt, value, parser):
- global VERBOSITY
- VERBOSITY += 1
-
-def try_playbook(path):
- if not os.path.exists(path):
- return 1
- if not os.access(path, os.R_OK):
- return 2
- return 0
-
-
-def select_playbook(path, args):
- playbook = None
- if len(args) > 0 and args[0] is not None:
- playbook = "%s/%s" % (path, args[0])
- rc = try_playbook(playbook)
- if rc != 0:
- print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc])
- return None
- return playbook
- else:
- fqdn = socket.getfqdn()
- hostpb = "%s/%s.yml" % (path, fqdn)
- shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0])
- localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK)
- errors = []
- for pb in [hostpb, shorthostpb, localpb]:
- rc = try_playbook(pb)
- if rc == 0:
- playbook = pb
- break
- else:
- errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc]))
- if playbook is None:
- print >>sys.stderr, "\n".join(errors)
- return playbook
-
-
-def main(args):
- """ Set up and run a local playbook """
- usage = "%prog [options] [playbook.yml]"
- parser = utils.SortedOptParser(usage=usage)
- parser.add_option('--purge', default=False, action='store_true',
- help='purge checkout after playbook run')
- parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
- help='only run the playbook if the repository has been updated')
- parser.add_option('-s', '--sleep', dest='sleep', default=None,
- help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests')
- parser.add_option('-f', '--force', dest='force', default=False,
- action='store_true',
- help='run the playbook even if the repository could '
- 'not be updated')
- parser.add_option('-d', '--directory', dest='dest', default=None,
- help='directory to checkout repository to')
- #parser.add_option('-l', '--live', default=True, action='store_live',
- # help='Print the ansible-playbook output while running')
- parser.add_option('-U', '--url', dest='url', default=None,
- help='URL of the playbook repository')
- parser.add_option('-C', '--checkout', dest='checkout',
- help='branch/tag/commit to checkout. '
- 'Defaults to behavior of repository module.')
- parser.add_option('-i', '--inventory-file', dest='inventory',
- help="location of the inventory host file")
- parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
- help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-v', '--verbose', default=False, action="callback",
- callback=increment_debug,
- help='Pass -vvvv to ansible-playbook')
- parser.add_option('-m', '--module-name', dest='module_name',
- default=DEFAULT_REPO_TYPE,
- help='Module name used to check out repository. '
- 'Default is %s.' % DEFAULT_REPO_TYPE)
- parser.add_option('--vault-password-file', dest='vault_password_file',
- help="vault password file")
- parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password')
- parser.add_option('-t', '--tags', dest='tags', default=False,
- help='only run plays and tasks tagged with these values')
- parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
- help='adds the hostkey for the repo url if not already added')
- parser.add_option('--key-file', dest='key_file',
- help="Pass '-i ' to the SSH arguments used by git.")
- options, args = parser.parse_args(args)
-
- hostname = socket.getfqdn()
- if not options.dest:
- # use a hostname dependent directory, in case of $HOME on nfs
- options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname)
-
- options.dest = os.path.abspath(options.dest)
-
- if not options.url:
- parser.error("URL for repository not specified, use -h for help")
- return 1
-
- now = datetime.datetime.now()
- print now.strftime("Starting ansible-pull at %F %T")
-
- # Attempt to use the inventory passed in as an argument
- # It might not yet have been downloaded so use localhost if note
- if not options.inventory or not os.path.exists(options.inventory):
- inv_opts = 'localhost,'
- else:
- inv_opts = options.inventory
- limit_opts = 'localhost:%s:127.0.0.1' % hostname
- repo_opts = "name=%s dest=%s" % (options.url, options.dest)
-
- if VERBOSITY == 0:
- base_opts = '-c local --limit "%s"' % limit_opts
- elif VERBOSITY > 0:
- debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ])
- base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts)
-
- if options.checkout:
- repo_opts += ' version=%s' % options.checkout
-
- # Only git module is supported
- if options.module_name == DEFAULT_REPO_TYPE:
- if options.accept_host_key:
- repo_opts += ' accept_hostkey=yes'
-
- if options.key_file:
- repo_opts += ' key_file=%s' % options.key_file
-
- path = utils.plugins.module_finder.find_plugin(options.module_name)
- if path is None:
- sys.stderr.write("module '%s' not found.\n" % options.module_name)
- return 1
-
- bin_path = os.path.dirname(os.path.abspath(__file__))
- cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % (
- bin_path, inv_opts, base_opts, options.module_name, repo_opts
- )
-
- for ev in options.extra_vars:
- cmd += ' -e "%s"' % ev
-
- if options.sleep:
- try:
- secs = random.randint(0,int(options.sleep));
- except ValueError:
- parser.error("%s is not a number." % options.sleep)
- return 1
-
- print >>sys.stderr, "Sleeping for %d seconds..." % secs
- time.sleep(secs);
-
-
- # RUN THe CHECKOUT COMMAND
- rc, out, err = cmd_functions.run_cmd(cmd, live=True)
-
- if rc != 0:
- if options.force:
- print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook."
- else:
- return rc
- elif options.ifchanged and '"changed": true' not in out:
- print "Repository has not changed, quitting."
- return 0
-
- playbook = select_playbook(options.dest, args)
-
- if playbook is None:
- print >>sys.stderr, "Could not find a playbook to run."
- return 1
-
- cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
- if options.vault_password_file:
- cmd += " --vault-password-file=%s" % options.vault_password_file
- if options.inventory:
- cmd += ' -i "%s"' % options.inventory
- for ev in options.extra_vars:
- cmd += ' -e "%s"' % ev
- if options.ask_sudo_pass:
- cmd += ' -K'
- if options.tags:
- cmd += ' -t "%s"' % options.tags
- os.chdir(options.dest)
-
- # RUN THE PLAYBOOK COMMAND
- rc, out, err = cmd_functions.run_cmd(cmd, live=True)
-
- if options.purge:
- os.chdir('/')
- try:
- shutil.rmtree(options.dest)
- except Exception, e:
- print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e))
-
- return rc
-
-if __name__ == '__main__':
- try:
- sys.exit(main(sys.argv[1:]))
- except KeyboardInterrupt, e:
- print >>sys.stderr, "Exit on user request.\n"
- sys.exit(1)
diff --git a/bin/ansible-pull b/bin/ansible-pull
new file mode 120000
index 00000000000..cabb1f519aa
--- /dev/null
+++ b/bin/ansible-pull
@@ -0,0 +1 @@
+ansible
\ No newline at end of file
diff --git a/bin/ansible-vault b/bin/ansible-vault
deleted file mode 100755
index 22cfc0e1487..00000000000
--- a/bin/ansible-vault
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2014, James Tanner
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-# ansible-vault is a script that encrypts/decrypts YAML files. See
-# http://docs.ansible.com/playbooks_vault.html for more details.
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import os
-import sys
-import traceback
-
-import ansible.constants as C
-
-from ansible import utils
-from ansible import errors
-from ansible.utils.vault import VaultEditor
-
-from optparse import OptionParser
-
-#-------------------------------------------------------------------------------------
-# Utility functions for parsing actions/options
-#-------------------------------------------------------------------------------------
-
-VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
-
-def build_option_parser(action):
- """
- Builds an option parser object based on the action
- the user wants to execute.
- """
-
- usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS)
- epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
- OptionParser.format_epilog = lambda self, formatter: self.epilog
- parser = OptionParser(usage=usage, epilog=epilog)
-
- if not action:
- parser.print_help()
- sys.exit()
-
- # options for all actions
- #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use")
- parser.add_option('--debug', dest='debug', action="store_true", help="debug")
- parser.add_option('--vault-password-file', dest='password_file',
- help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE)
-
- # options specific to actions
- if action == "create":
- parser.set_usage("usage: %prog create [options] file_name")
- elif action == "decrypt":
- parser.set_usage("usage: %prog decrypt [options] file_name")
- elif action == "edit":
- parser.set_usage("usage: %prog edit [options] file_name")
- elif action == "view":
- parser.set_usage("usage: %prog view [options] file_name")
- elif action == "encrypt":
- parser.set_usage("usage: %prog encrypt [options] file_name")
- elif action == "rekey":
- parser.set_usage("usage: %prog rekey [options] file_name")
-
- # done, return the parser
- return parser
-
-def get_action(args):
- """
- Get the action the user wants to execute from the
- sys argv list.
- """
- for i in range(0,len(args)):
- arg = args[i]
- if arg in VALID_ACTIONS:
- del args[i]
- return arg
- return None
-
-def get_opt(options, k, defval=""):
- """
- Returns an option from an Optparse values instance.
- """
- try:
- data = getattr(options, k)
- except:
- return defval
- if k == "roles_path":
- if os.pathsep in data:
- data = data.split(os.pathsep)[0]
- return data
-
-#-------------------------------------------------------------------------------------
-# Command functions
-#-------------------------------------------------------------------------------------
-
-def execute_create(args, options, parser):
- if len(args) > 1:
- raise errors.AnsibleError("'create' does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- this_editor = VaultEditor(cipher, password, args[0])
- this_editor.create_file()
-
-def execute_decrypt(args, options, parser):
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.decrypt_file()
-
- print "Decryption successful"
-
-def execute_edit(args, options, parser):
-
- if len(args) > 1:
- raise errors.AnsibleError("edit does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = None
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.edit_file()
-
-def execute_view(args, options, parser):
-
- if len(args) > 1:
- raise errors.AnsibleError("view does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = None
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.view_file()
-
-def execute_encrypt(args, options, parser):
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.encrypt_file()
-
- print "Encryption successful"
-
-def execute_rekey(args, options, parser):
-
- if not options.password_file:
- password, __ = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- __, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
-
- cipher = None
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.rekey_file(new_password)
-
- print "Rekey successful"
-
-#-------------------------------------------------------------------------------------
-# MAIN
-#-------------------------------------------------------------------------------------
-
-def main():
-
- action = get_action(sys.argv)
- parser = build_option_parser(action)
- (options, args) = parser.parse_args()
-
- if not len(args):
- raise errors.AnsibleError(
- "The '%s' command requires a filename as the first argument" % action
- )
-
- # execute the desired action
- try:
- fn = globals()["execute_%s" % action]
- fn(args, options, parser)
- except Exception, err:
- if options.debug:
- print traceback.format_exc()
- print "ERROR:",err
- sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/bin/ansible-vault b/bin/ansible-vault
new file mode 120000
index 00000000000..cabb1f519aa
--- /dev/null
+++ b/bin/ansible-vault
@@ -0,0 +1 @@
+ansible
\ No newline at end of file
diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py
index ba5ca83b723..8637adb54d6 100644
--- a/lib/ansible/__init__.py
+++ b/lib/ansible/__init__.py
@@ -14,5 +14,9 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-__version__ = '2.0.0'
-__author__ = 'Michael DeHaan'
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+__version__ = '2.0'
diff --git a/v2/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
similarity index 100%
rename from v2/ansible/cli/__init__.py
rename to lib/ansible/cli/__init__.py
diff --git a/v2/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py
similarity index 100%
rename from v2/ansible/cli/adhoc.py
rename to lib/ansible/cli/adhoc.py
diff --git a/v2/ansible/cli/doc.py b/lib/ansible/cli/doc.py
similarity index 100%
rename from v2/ansible/cli/doc.py
rename to lib/ansible/cli/doc.py
diff --git a/v2/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
similarity index 100%
rename from v2/ansible/cli/galaxy.py
rename to lib/ansible/cli/galaxy.py
diff --git a/v2/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py
similarity index 100%
rename from v2/ansible/cli/playbook.py
rename to lib/ansible/cli/playbook.py
diff --git a/v2/ansible/cli/pull.py b/lib/ansible/cli/pull.py
similarity index 100%
rename from v2/ansible/cli/pull.py
rename to lib/ansible/cli/pull.py
diff --git a/v2/ansible/cli/vault.py b/lib/ansible/cli/vault.py
similarity index 100%
rename from v2/ansible/cli/vault.py
rename to lib/ansible/cli/vault.py
diff --git a/v2/ansible/compat/__init__.py b/lib/ansible/compat/__init__.py
similarity index 100%
rename from v2/ansible/compat/__init__.py
rename to lib/ansible/compat/__init__.py
diff --git a/v2/ansible/compat/tests/__init__.py b/lib/ansible/compat/tests/__init__.py
similarity index 100%
rename from v2/ansible/compat/tests/__init__.py
rename to lib/ansible/compat/tests/__init__.py
diff --git a/v2/ansible/compat/tests/mock.py b/lib/ansible/compat/tests/mock.py
similarity index 100%
rename from v2/ansible/compat/tests/mock.py
rename to lib/ansible/compat/tests/mock.py
diff --git a/v2/ansible/compat/tests/unittest.py b/lib/ansible/compat/tests/unittest.py
similarity index 100%
rename from v2/ansible/compat/tests/unittest.py
rename to lib/ansible/compat/tests/unittest.py
diff --git a/v2/ansible/config/__init__.py b/lib/ansible/config/__init__.py
similarity index 100%
rename from v2/ansible/config/__init__.py
rename to lib/ansible/config/__init__.py
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 089de5b7c5b..456beb8bbc4 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -15,10 +15,15 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
import os
import pwd
import sys
-import ConfigParser
+
+from six.moves import configparser
from string import ascii_letters, digits
# copied from utils, avoid circular reference fun :)
@@ -35,13 +40,15 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False,
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
- return mk_boolean(value)
- if value and integer:
- return int(value)
- if value and floating:
- return float(value)
- if value and islist:
- return [x.strip() for x in value.split(',')]
+ value = mk_boolean(value)
+ if value:
+ if integer:
+ value = int(value)
+ elif floating:
+ value = float(value)
+ elif islist:
+ if isinstance(value, basestring):
+ value = [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
@@ -60,7 +67,7 @@ def _get_config(p, section, key, env_var, default):
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
- p = ConfigParser.ConfigParser()
+ p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
@@ -73,8 +80,8 @@ def load_config_file():
if path is not None and os.path.exists(path):
try:
p.read(path)
- except ConfigParser.Error as e:
- print "Error reading config file: \n%s" % e
+ except configparser.Error as e:
+ print("Error reading config file: \n{0}".format(e))
sys.exit(1)
return p
return None
@@ -98,7 +105,8 @@ YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
DEFAULTS='defaults'
# configurable things
-DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts')))
+DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
+DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts')))
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
@@ -112,6 +120,7 @@ DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
+DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
@@ -122,7 +131,6 @@ DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None,
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
-DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
@@ -141,7 +149,7 @@ BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
-DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None)
+DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# need to rethink impementing these 2
DEFAULT_BECOME_EXE = None
@@ -156,6 +164,7 @@ DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', '
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
+DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
@@ -173,8 +182,8 @@ DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings',
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
-DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
-
+RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
+RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
@@ -196,10 +205,16 @@ ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_fi
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
+# galaxy related
+DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
+# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
+GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', ['git','hg'], islist=True)
+
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things
+MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
diff --git a/v2/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
similarity index 100%
rename from v2/ansible/errors/__init__.py
rename to lib/ansible/errors/__init__.py
diff --git a/v2/ansible/errors/yaml_strings.py b/lib/ansible/errors/yaml_strings.py
similarity index 100%
rename from v2/ansible/errors/yaml_strings.py
rename to lib/ansible/errors/yaml_strings.py
diff --git a/v2/ansible/executor/__init__.py b/lib/ansible/executor/__init__.py
similarity index 100%
rename from v2/ansible/executor/__init__.py
rename to lib/ansible/executor/__init__.py
diff --git a/v2/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py
similarity index 100%
rename from v2/ansible/executor/connection_info.py
rename to lib/ansible/executor/connection_info.py
diff --git a/v2/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py
similarity index 100%
rename from v2/ansible/executor/module_common.py
rename to lib/ansible/executor/module_common.py
diff --git a/v2/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
similarity index 100%
rename from v2/ansible/executor/play_iterator.py
rename to lib/ansible/executor/play_iterator.py
diff --git a/v2/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
similarity index 100%
rename from v2/ansible/executor/playbook_executor.py
rename to lib/ansible/executor/playbook_executor.py
diff --git a/v2/ansible/executor/process/__init__.py b/lib/ansible/executor/process/__init__.py
similarity index 100%
rename from v2/ansible/executor/process/__init__.py
rename to lib/ansible/executor/process/__init__.py
diff --git a/v2/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py
similarity index 100%
rename from v2/ansible/executor/process/result.py
rename to lib/ansible/executor/process/result.py
diff --git a/v2/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py
similarity index 100%
rename from v2/ansible/executor/process/worker.py
rename to lib/ansible/executor/process/worker.py
diff --git a/v2/ansible/executor/stats.py b/lib/ansible/executor/stats.py
similarity index 100%
rename from v2/ansible/executor/stats.py
rename to lib/ansible/executor/stats.py
diff --git a/v2/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
similarity index 100%
rename from v2/ansible/executor/task_executor.py
rename to lib/ansible/executor/task_executor.py
diff --git a/v2/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
similarity index 100%
rename from v2/ansible/executor/task_queue_manager.py
rename to lib/ansible/executor/task_queue_manager.py
diff --git a/v2/ansible/executor/task_queue_manager.py: b/lib/ansible/executor/task_queue_manager.py:
similarity index 100%
rename from v2/ansible/executor/task_queue_manager.py:
rename to lib/ansible/executor/task_queue_manager.py:
diff --git a/v2/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py
similarity index 100%
rename from v2/ansible/executor/task_result.py
rename to lib/ansible/executor/task_result.py
diff --git a/v2/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py
similarity index 100%
rename from v2/ansible/galaxy/__init__.py
rename to lib/ansible/galaxy/__init__.py
diff --git a/v2/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
similarity index 100%
rename from v2/ansible/galaxy/api.py
rename to lib/ansible/galaxy/api.py
diff --git a/v2/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2
similarity index 100%
rename from v2/ansible/galaxy/data/metadata_template.j2
rename to lib/ansible/galaxy/data/metadata_template.j2
diff --git a/v2/ansible/galaxy/data/readme b/lib/ansible/galaxy/data/readme
similarity index 100%
rename from v2/ansible/galaxy/data/readme
rename to lib/ansible/galaxy/data/readme
diff --git a/v2/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
similarity index 100%
rename from v2/ansible/galaxy/role.py
rename to lib/ansible/galaxy/role.py
diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py
index 2048046d3c1..063398f17f9 100644
--- a/lib/ansible/inventory/__init__.py
+++ b/lib/ansible/inventory/__init__.py
@@ -16,36 +16,44 @@
# along with Ansible. If not, see .
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
import fnmatch
import os
import sys
import re
+import stat
import subprocess
-import ansible.constants as C
+from ansible import constants as C
+from ansible.errors import *
+
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
from ansible.inventory.dir import InventoryDirectory
from ansible.inventory.group import Group
from ansible.inventory.host import Host
-from ansible import errors
-from ansible import utils
+from ansible.plugins import vars_loader
+from ansible.utils.path import is_executable
+from ansible.utils.vars import combine_vars
class Inventory(object):
"""
Host inventory for ansible.
"""
- __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
- 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
- '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
+ #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
+ # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
+ # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
- def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None):
+ def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = host_list
- self._vault_password=vault_password
+ self._loader = loader
+ self._variable_manager = variable_manager
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
@@ -97,7 +105,7 @@ class Inventory(object):
if os.path.isdir(host_list):
# Ensure basedir is inside the directory
self.host_list = os.path.join(self.host_list, "")
- self.parser = InventoryDirectory(filename=host_list)
+ self.parser = InventoryDirectory(loader=self._loader, filename=host_list)
self.groups = self.parser.groups.values()
else:
# check to see if the specified file starts with a
@@ -113,9 +121,9 @@ class Inventory(object):
except:
pass
- if utils.is_executable(host_list):
+ if is_executable(host_list):
try:
- self.parser = InventoryScript(filename=host_list)
+ self.parser = InventoryScript(loader=self._loader, filename=host_list)
self.groups = self.parser.groups.values()
except:
if not shebang_present:
@@ -134,19 +142,23 @@ class Inventory(object):
else:
raise
- utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True)
+ vars_loader.add_directory(self.basedir(), with_subdir=True)
else:
raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
- self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ]
+ self._vars_plugins = [ x for x in vars_loader.all(self) ]
+ # FIXME: shouldn't be required, since the group/host vars file
+ # management will be done in VariableManager
# get group vars from group_vars/ files and vars plugins
for group in self.groups:
- group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password))
+ # FIXME: combine_vars
+ group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts():
- host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password))
+ # FIXME: combine_vars
+ host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
def _match(self, str, pattern_str):
@@ -192,9 +204,9 @@ class Inventory(object):
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
- hosts = [ h for h in hosts if h.name in self._restriction ]
+ hosts = [ h for h in hosts if h in self._restriction ]
if self._also_restriction is not None:
- hosts = [ h for h in hosts if h.name in self._also_restriction ]
+ hosts = [ h for h in hosts if h in self._also_restriction ]
return hosts
@@ -320,6 +332,8 @@ class Inventory(object):
new_host = Host(pattern)
new_host.set_variable("ansible_python_interpreter", sys.executable)
new_host.set_variable("ansible_connection", "local")
+ new_host.ipv4_address = '127.0.0.1'
+
ungrouped = self.get_group("ungrouped")
if ungrouped is None:
self.add_group(Group('ungrouped'))
@@ -420,7 +434,7 @@ class Inventory(object):
group = self.get_group(groupname)
if group is None:
- raise errors.AnsibleError("group not found: %s" % groupname)
+ raise Exception("group not found: %s" % groupname)
vars = {}
@@ -428,19 +442,21 @@ class Inventory(object):
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
for updated in vars_results:
if updated is not None:
- vars = utils.combine_vars(vars, updated)
+ # FIXME: combine_vars
+ vars = combine_vars(vars, updated)
# Read group_vars/ files
- vars = utils.combine_vars(vars, self.get_group_vars(group))
+ # FIXME: combine_vars
+ vars = combine_vars(vars, self.get_group_vars(group))
return vars
- def get_variables(self, hostname, update_cached=False, vault_password=None):
+ def get_vars(self, hostname, update_cached=False, vault_password=None):
host = self.get_host(hostname)
if not host:
- raise errors.AnsibleError("host not found: %s" % hostname)
- return host.get_variables()
+ raise Exception("host not found: %s" % hostname)
+ return host.get_vars()
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
@@ -460,22 +476,26 @@ class Inventory(object):
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
for updated in vars_results:
if updated is not None:
- vars = utils.combine_vars(vars, updated)
+ # FIXME: combine_vars
+ vars = combine_vars(vars, updated)
# plugin.get_host_vars retrieves just vars for specific host
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
for updated in vars_results:
if updated is not None:
- vars = utils.combine_vars(vars, updated)
+ # FIXME: combine_vars
+ vars = combine_vars(vars, updated)
# still need to check InventoryParser per host vars
# which actually means InventoryScript per host,
# which is not performant
if self.parser is not None:
- vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
+ # FIXME: combine_vars
+ vars = combine_vars(vars, self.parser.get_host_variables(host))
# Read host_vars/ files
- vars = utils.combine_vars(vars, self.get_host_vars(host))
+ # FIXME: combine_vars
+ vars = combine_vars(vars, self.get_host_vars(host))
return vars
@@ -490,7 +510,7 @@ class Inventory(object):
""" return a list of hostnames for a pattern """
- result = [ h.name for h in self.get_hosts(pattern) ]
+ result = [ h for h in self.get_hosts(pattern) ]
if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]:
result = [pattern]
return result
@@ -498,11 +518,7 @@ class Inventory(object):
def list_groups(self):
return sorted([ g.name for g in self.groups ], key=lambda x: x)
- # TODO: remove this function
- def get_restriction(self):
- return self._restriction
-
- def restrict_to(self, restriction):
+ def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to exclude failed hosts in main playbook code, don't use this for other
@@ -544,7 +560,7 @@ class Inventory(object):
results.append(x)
self._subset = results
- def lift_restriction(self):
+ def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
@@ -588,10 +604,12 @@ class Inventory(object):
self._playbook_basedir = dir
# get group vars from group_vars/ files
for group in self.groups:
- group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
+ # FIXME: combine_vars
+ group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
# get host vars from host_vars/ files
for host in self.get_hosts():
- host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
+ # FIXME: combine_vars
+ host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
# invalidate cache
self._vars_per_host = {}
self._vars_per_group = {}
@@ -639,15 +657,15 @@ class Inventory(object):
if _basedir == self._playbook_basedir and scan_pass != 1:
continue
+ # FIXME: these should go to VariableManager
if group and host is None:
# load vars in dir/group_vars/name_of_group
base_path = os.path.join(basedir, "group_vars/%s" % group.name)
- results = utils.load_vars(base_path, results, vault_password=self._vault_password)
-
+ self._variable_manager.add_group_vars_file(base_path, self._loader)
elif host and group is None:
# same for hostvars in dir/host_vars/name_of_host
base_path = os.path.join(basedir, "host_vars/%s" % host.name)
- results = utils.load_vars(base_path, results, vault_password=self._vault_password)
+ self._variable_manager.add_host_vars_file(base_path, self._loader)
# all done, results is a dictionary of variables for this particular host.
return results
diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py
index 9ac23fff899..735f32d62c3 100644
--- a/lib/ansible/inventory/dir.py
+++ b/lib/ansible/inventory/dir.py
@@ -17,20 +17,25 @@
# along with Ansible. If not, see .
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
-import ansible.constants as C
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
-from ansible import utils
-from ansible import errors
+from ansible.utils.path import is_executable
+from ansible.utils.vars import combine_vars
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
@@ -38,10 +43,12 @@ class InventoryDirectory(object):
self.hosts = {}
self.groups = {}
+ self._loader = loader
+
for i in self.names:
# Skip files that end with certain extensions or characters
- if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
+ if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo")):
continue
# Skip hidden files
if i.startswith('.') and not i.startswith('./'):
@@ -51,9 +58,9 @@ class InventoryDirectory(object):
continue
fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath):
- parser = InventoryDirectory(filename=fullpath)
- elif utils.is_executable(fullpath):
- parser = InventoryScript(filename=fullpath)
+ parser = InventoryDirectory(loader=loader, filename=fullpath)
+ elif is_executable(fullpath):
+ parser = InventoryScript(loader=loader, filename=fullpath)
else:
parser = InventoryParser(filename=fullpath)
self.parsers.append(parser)
@@ -153,7 +160,7 @@ class InventoryDirectory(object):
# name
if group.name != newgroup.name:
- raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
+ raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
# depth
group.depth = max([group.depth, newgroup.depth])
@@ -196,14 +203,14 @@ class InventoryDirectory(object):
self.groups[newparent.name].add_child_group(group)
# variables
- group.vars = utils.combine_vars(group.vars, newgroup.vars)
+ group.vars = combine_vars(group.vars, newgroup.vars)
def _merge_hosts(self,host, newhost):
""" Merge all of instance newhost into host """
# name
if host.name != newhost.name:
- raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
+ raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
# group membership relation
for newgroup in newhost.groups:
@@ -218,7 +225,7 @@ class InventoryDirectory(object):
self.groups[newgroup.name].add_host(host)
# variables
- host.vars = utils.combine_vars(host.vars, newhost.vars)
+ host.vars = combine_vars(host.vars, newhost.vars)
def get_host_variables(self, host):
""" Gets additional host variables from all inventories """
diff --git a/lib/ansible/inventory/expand_hosts.py b/lib/ansible/inventory/expand_hosts.py
index f1297409355..b5a957c53fe 100644
--- a/lib/ansible/inventory/expand_hosts.py
+++ b/lib/ansible/inventory/expand_hosts.py
@@ -30,6 +30,9 @@ expanded into 001, 002 ...009, 010.
Note that when beg is specified with left zero padding, then the length of
end must be the same as that of beg, else an exception is raised.
'''
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
import string
from ansible import errors
diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py
index 262558e69c8..6525e69b466 100644
--- a/lib/ansible/inventory/group.py
+++ b/lib/ansible/inventory/group.py
@@ -14,11 +14,15 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-class Group(object):
+from ansible.utils.debug import debug
+
+class Group:
''' a group of ansible hosts '''
- __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
+ #__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
@@ -29,9 +33,49 @@ class Group(object):
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
+
#self.clear_hosts_cache()
- if self.name is None:
- raise Exception("group name is required")
+ #if self.name is None:
+ # raise Exception("group name is required")
+
+ def __repr__(self):
+ return self.get_name()
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ return self.deserialize(data)
+
+ def serialize(self):
+ parent_groups = []
+ for parent in self.parent_groups:
+ parent_groups.append(parent.serialize())
+
+ result = dict(
+ name=self.name,
+ vars=self.vars.copy(),
+ parent_groups=parent_groups,
+ depth=self.depth,
+ )
+
+ debug("serializing group, result is: %s" % result)
+ return result
+
+ def deserialize(self, data):
+ debug("deserializing group, data is: %s" % data)
+ self.__init__()
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+
+ parent_groups = data.get('parent_groups', [])
+ for parent_data in parent_groups:
+ g = Group()
+ g.deserialize(parent_data)
+ self.parent_groups.append(g)
+
+ def get_name(self):
+ return self.name
def add_child_group(self, group):
@@ -100,7 +144,7 @@ class Group(object):
hosts.append(mine)
return hosts
- def get_variables(self):
+ def get_vars(self):
return self.vars.copy()
def _get_ancestors(self):
diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py
index d4dc20fa462..29d6afd9912 100644
--- a/lib/ansible/inventory/host.py
+++ b/lib/ansible/inventory/host.py
@@ -15,24 +15,88 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import ansible.constants as C
-from ansible import utils
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-class Host(object):
+from ansible import constants as C
+from ansible.inventory.group import Group
+from ansible.utils.vars import combine_vars
+
+__all__ = ['Host']
+
+class Host:
''' a single ansible host '''
- __slots__ = [ 'name', 'vars', 'groups' ]
+ #__slots__ = [ 'name', 'vars', 'groups' ]
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ return self.deserialize(data)
+
+ def __eq__(self, other):
+ return self.name == other.name
+
+ def serialize(self):
+ groups = []
+ for group in self.groups:
+ groups.append(group.serialize())
+
+ return dict(
+ name=self.name,
+ vars=self.vars.copy(),
+ ipv4_address=self.ipv4_address,
+ ipv6_address=self.ipv6_address,
+ port=self.port,
+ gathered_facts=self._gathered_facts,
+ groups=groups,
+ )
+
+ def deserialize(self, data):
+ self.__init__()
+
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+ self.ipv4_address = data.get('ipv4_address', '')
+ self.ipv6_address = data.get('ipv6_address', '')
+ self.port = data.get('port')
+
+ groups = data.get('groups', [])
+ for group_data in groups:
+ g = Group()
+ g.deserialize(group_data)
+ self.groups.append(g)
def __init__(self, name=None, port=None):
self.name = name
self.vars = {}
self.groups = []
- if port and port != C.DEFAULT_REMOTE_PORT:
- self.set_variable('ansible_ssh_port', int(port))
- if self.name is None:
- raise Exception("host name is required")
+ self.ipv4_address = name
+ self.ipv6_address = name
+
+ if port and port != C.DEFAULT_REMOTE_PORT:
+ self.port = int(port)
+ else:
+ self.port = C.DEFAULT_REMOTE_PORT
+
+ self._gathered_facts = False
+
+ def __repr__(self):
+ return self.get_name()
+
+ def get_name(self):
+ return self.name
+
+ @property
+ def gathered_facts(self):
+ return self._gathered_facts
+
+ def set_gathered_facts(self, gathered):
+ self._gathered_facts = gathered
def add_group(self, group):
@@ -52,16 +116,15 @@ class Host(object):
groups[a.name] = a
return groups.values()
- def get_variables(self):
+ def get_vars(self):
results = {}
groups = self.get_groups()
for group in sorted(groups, key=lambda g: g.depth):
- results = utils.combine_vars(results, group.get_variables())
- results = utils.combine_vars(results, self.vars)
+ results = combine_vars(results, group.get_vars())
+ results = combine_vars(results, self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
return results
-
diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py
index bd9a98e7f86..e004ee8bb75 100644
--- a/lib/ansible/inventory/ini.py
+++ b/lib/ansible/inventory/ini.py
@@ -16,17 +16,20 @@
# along with Ansible. If not, see .
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-import ansible.constants as C
+import ast
+import shlex
+import re
+
+from ansible import constants as C
+from ansible.errors import *
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
-from ansible import errors
-from ansible import utils
-import shlex
-import re
-import ast
+from ansible.utils.unicode import to_unicode
class InventoryParser(object):
"""
@@ -34,9 +37,8 @@ class InventoryParser(object):
"""
def __init__(self, filename=C.DEFAULT_HOST_LIST):
-
+ self.filename = filename
with open(filename) as fh:
- self.filename = filename
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
@@ -54,10 +56,7 @@ class InventoryParser(object):
def _parse_value(v):
if "#" not in v:
try:
- ret = ast.literal_eval(v)
- if not isinstance(ret, float):
- # Do not trim floats. Eg: "1.20" to 1.2
- return ret
+ v = ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
@@ -66,7 +65,7 @@ class InventoryParser(object):
except SyntaxError:
# Is this a hash with an equals at the end?
pass
- return v
+ return to_unicode(v, nonstring='passthru', errors='strict')
# [webservers]
# alpha
@@ -91,8 +90,8 @@ class InventoryParser(object):
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
- for lineno in range(len(self.lines)):
- line = utils.before_comment(self.lines[lineno]).strip()
+ for line in self.lines:
+ line = self._before_comment(line).strip()
if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line:
@@ -146,8 +145,11 @@ class InventoryParser(object):
try:
(k,v) = t.split("=", 1)
except ValueError, e:
- raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e)))
- host.set_variable(k, self._parse_value(v))
+ raise AnsibleError("Invalid ini entry in %s: %s - %s" % (self.filename, t, str(e)))
+ if k == 'ansible_ssh_host':
+ host.ipv4_address = self._parse_value(v)
+ else:
+ host.set_variable(k, self._parse_value(v))
self.groups[active_group_name].add_host(host)
# [southeast:children]
@@ -157,8 +159,8 @@ class InventoryParser(object):
def _parse_group_children(self):
group = None
- for lineno in range(len(self.lines)):
- line = self.lines[lineno].strip()
+ for line in self.lines:
+ line = line.strip()
if line is None or line == '':
continue
if line.startswith("[") and ":children]" in line:
@@ -173,7 +175,7 @@ class InventoryParser(object):
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
- raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line))
+ raise AnsibleError("child group is not defined: (%s)" % line)
else:
group.add_child_group(kid_group)
@@ -184,13 +186,13 @@ class InventoryParser(object):
def _parse_group_variables(self):
group = None
- for lineno in range(len(self.lines)):
- line = self.lines[lineno].strip()
+ for line in self.lines:
+ line = line.strip()
if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
- raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line))
+ raise AnsibleError("can't add vars to undefined group: %s" % line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
@@ -199,10 +201,18 @@ class InventoryParser(object):
pass
elif group:
if "=" not in line:
- raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1))
+ raise AnsibleError("variables assigned to group must be in key=value form")
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host):
return {}
+
+ def _before_comment(self, msg):
+ ''' what's the part of a string before a comment? '''
+ msg = msg.replace("\#","**NOT_A_COMMENT**")
+ msg = msg.split("#")[0]
+ msg = msg.replace("**NOT_A_COMMENT**","#")
+ return msg
+
diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py
index b83cb9bcc7a..9675d70f690 100644
--- a/lib/ansible/inventory/script.py
+++ b/lib/ansible/inventory/script.py
@@ -16,22 +16,26 @@
# along with Ansible. If not, see .
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
import subprocess
-import ansible.constants as C
+import sys
+
+from ansible import constants as C
+from ansible.errors import *
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
-from ansible import utils
-from ansible import errors
-import sys
-class InventoryScript(object):
+class InventoryScript:
''' Host inventory parser for ansible using external inventory scripts. '''
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
+
+ self._loader = loader
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
@@ -41,11 +45,11 @@ class InventoryScript(object):
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate()
if sp.returncode != 0:
- raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
+ raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
self.data = stdout
# see comment about _meta below
@@ -58,7 +62,7 @@ class InventoryScript(object):
all_hosts = {}
# not passing from_remote because data from CMDB is trusted
- self.raw = utils.parse_json(self.data)
+ self.raw = self._loader.load(self.data)
self.raw = json_dict_bytes_to_unicode(self.raw)
all = Group('all')
@@ -68,7 +72,7 @@ class InventoryScript(object):
if 'failed' in self.raw:
sys.stderr.write(err + "\n")
- raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
+ raise AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
for (group_name, data) in self.raw.items():
@@ -92,12 +96,12 @@ class InventoryScript(object):
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
- elif not any(k in data for k in ('hosts','vars','children')):
+ elif not any(k in data for k in ('hosts','vars')):
data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
- raise errors.AnsibleError("You defined a group \"%s\" with bad "
+ raise AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']:
@@ -108,7 +112,7 @@ class InventoryScript(object):
if 'vars' in data:
if not isinstance(data['vars'], dict):
- raise errors.AnsibleError("You defined a group \"%s\" with bad "
+ raise AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
for k, v in data['vars'].iteritems():
@@ -143,12 +147,12 @@ class InventoryScript(object):
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return dict()
try:
- return json_dict_bytes_to_unicode(utils.parse_json(out))
+ return json_dict_bytes_to_unicode(self._loader.load(out))
except ValueError:
- raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
+ raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
diff --git a/lib/ansible/inventory/vars_plugins/noop.py b/lib/ansible/inventory/vars_plugins/noop.py
index 5d4b4b6658c..8f0c98cad56 100644
--- a/lib/ansible/inventory/vars_plugins/noop.py
+++ b/lib/ansible/inventory/vars_plugins/noop.py
@@ -15,6 +15,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
class VarsModule(object):
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index 54a1a9cfff7..8f9b03f882d 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -43,7 +43,7 @@ BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
# can be inserted in any module source automatically by including
# #<> on a blank line by itself inside
# of an ansible module. The source of this common code lives
-# in lib/ansible/module_common.py
+# in ansible/executor/module_common.py
import locale
import os
@@ -65,6 +65,7 @@ import pwd
import platform
import errno
import tempfile
+from itertools import imap, repeat
try:
import json
@@ -234,7 +235,7 @@ def load_platform_subclass(cls, *args, **kwargs):
return super(cls, subclass).__new__(subclass)
-def json_dict_unicode_to_bytes(d):
+def json_dict_unicode_to_bytes(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -242,17 +243,17 @@ def json_dict_unicode_to_bytes(d):
'''
if isinstance(d, unicode):
- return d.encode('utf-8')
+ return d.encode(encoding)
elif isinstance(d, dict):
- return dict(map(json_dict_unicode_to_bytes, d.iteritems()))
+ return dict(imap(json_dict_unicode_to_bytes, d.iteritems(), repeat(encoding)))
elif isinstance(d, list):
- return list(map(json_dict_unicode_to_bytes, d))
+ return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
elif isinstance(d, tuple):
- return tuple(map(json_dict_unicode_to_bytes, d))
+ return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
else:
return d
-def json_dict_bytes_to_unicode(d):
+def json_dict_bytes_to_unicode(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -260,13 +261,13 @@ def json_dict_bytes_to_unicode(d):
'''
if isinstance(d, str):
- return unicode(d, 'utf-8')
+ return unicode(d, encoding)
elif isinstance(d, dict):
- return dict(map(json_dict_bytes_to_unicode, d.iteritems()))
+ return dict(imap(json_dict_bytes_to_unicode, d.iteritems(), repeat(encoding)))
elif isinstance(d, list):
- return list(map(json_dict_bytes_to_unicode, d))
+ return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
elif isinstance(d, tuple):
- return tuple(map(json_dict_bytes_to_unicode, d))
+ return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
else:
return d
@@ -359,9 +360,9 @@ class AnsibleModule(object):
# reset to LANG=C if it's an invalid/unavailable locale
self._check_locale()
- (self.params, self.args) = self._load_params()
+ self.params = self._load_params()
- self._legal_inputs = ['CHECKMODE', 'NO_LOG']
+ self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log']
self.aliases = self._handle_aliases()
@@ -888,7 +889,7 @@ class AnsibleModule(object):
def _check_for_check_mode(self):
for (k,v) in self.params.iteritems():
- if k == 'CHECKMODE':
+ if k == '_ansible_check_mode':
if not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module does not support check mode")
if self.supports_check_mode:
@@ -896,13 +897,13 @@ class AnsibleModule(object):
def _check_for_no_log(self):
for (k,v) in self.params.iteritems():
- if k == 'NO_LOG':
+ if k == '_ansible_no_log':
self.no_log = self.boolean(v)
def _check_invalid_arguments(self):
for (k,v) in self.params.iteritems():
# these should be in legal inputs already
- #if k in ('CHECKMODE', 'NO_LOG'):
+ #if k in ('_ansible_check_mode', '_ansible_no_log'):
# continue
if k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
@@ -1075,20 +1076,11 @@ class AnsibleModule(object):
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
- args = MODULE_ARGS
- items = shlex.split(args)
- params = {}
- for x in items:
- try:
- (k, v) = x.split("=",1)
- except Exception, e:
- self.fail_json(msg="this module requires key=value arguments (%s)" % (items))
- if k in params:
- self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v))
- params[k] = v
- params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
- params2.update(params)
- return (params2, args)
+ params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
+ if params is None:
+ params = dict()
+ return params
+
def _log_invocation(self):
''' log that ansible ran the module '''
@@ -1209,13 +1201,17 @@ class AnsibleModule(object):
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
- for encoding in ("utf-8", "latin-1", "unicode_escape"):
+ for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding)
- # Old systems using simplejson module does not support encoding keyword.
- except TypeError, e:
- return json.dumps(data)
- except UnicodeDecodeError, e:
+ # Old systems using old simplejson module does not support encoding keyword.
+ except TypeError:
+ try:
+ new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
+ except UnicodeDecodeError:
+ continue
+ return json.dumps(new_data)
+ except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
@@ -1452,7 +1448,7 @@ class AnsibleModule(object):
msg = None
st_in = None
- # Set a temporart env path if a prefix is passed
+ # Set a temporary env path if a prefix is passed
env=os.environ
if path_prefix:
env['PATH']="%s:%s" % (path_prefix, env['PATH'])
diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1
index ee7d3ddeca4..57d2c1b101c 100644
--- a/lib/ansible/module_utils/powershell.ps1
+++ b/lib/ansible/module_utils/powershell.ps1
@@ -142,14 +142,14 @@ Function ConvertTo-Bool
return
}
-# Helper function to calculate a hash of a file in a way which powershell 3
+# Helper function to calculate md5 of a file in a way which powershell 3
# and above can handle:
-Function Get-FileChecksum($path)
+Function Get-FileMd5($path)
{
$hash = ""
If (Test-Path -PathType Leaf $path)
{
- $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
+ $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider;
$fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
diff --git a/lib/ansible/modules/__init__.py b/lib/ansible/modules/__init__.py
index e69de29bb2d..ae8ccff5952 100644
--- a/lib/ansible/modules/__init__.py
+++ b/lib/ansible/modules/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
deleted file mode 160000
index 9028e9d4be8..00000000000
--- a/lib/ansible/modules/core
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 9028e9d4be8a3dbb96c81a799e18f3adf63d9fd0
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
deleted file mode 160000
index dd80fa221ce..00000000000
--- a/lib/ansible/modules/extras
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit dd80fa221ce0adb3abd658fbd1aa09bf7cf8a6dc
diff --git a/v2/ansible/new_inventory/__init__.py b/lib/ansible/new_inventory/__init__.py
similarity index 100%
rename from v2/ansible/new_inventory/__init__.py
rename to lib/ansible/new_inventory/__init__.py
diff --git a/v2/ansible/new_inventory/group.py b/lib/ansible/new_inventory/group.py
similarity index 100%
rename from v2/ansible/new_inventory/group.py
rename to lib/ansible/new_inventory/group.py
diff --git a/v2/ansible/new_inventory/host.py b/lib/ansible/new_inventory/host.py
similarity index 100%
rename from v2/ansible/new_inventory/host.py
rename to lib/ansible/new_inventory/host.py
diff --git a/v2/ansible/parsing/__init__.py b/lib/ansible/parsing/__init__.py
similarity index 100%
rename from v2/ansible/parsing/__init__.py
rename to lib/ansible/parsing/__init__.py
diff --git a/v2/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py
similarity index 100%
rename from v2/ansible/parsing/mod_args.py
rename to lib/ansible/parsing/mod_args.py
diff --git a/v2/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py
similarity index 100%
rename from v2/ansible/parsing/splitter.py
rename to lib/ansible/parsing/splitter.py
diff --git a/v2/ansible/parsing/utils/__init__.py b/lib/ansible/parsing/utils/__init__.py
similarity index 100%
rename from v2/ansible/parsing/utils/__init__.py
rename to lib/ansible/parsing/utils/__init__.py
diff --git a/v2/ansible/parsing/utils/jsonify.py b/lib/ansible/parsing/utils/jsonify.py
similarity index 100%
rename from v2/ansible/parsing/utils/jsonify.py
rename to lib/ansible/parsing/utils/jsonify.py
diff --git a/v2/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py
similarity index 100%
rename from v2/ansible/parsing/vault/__init__.py
rename to lib/ansible/parsing/vault/__init__.py
diff --git a/v2/ansible/parsing/yaml/__init__.py b/lib/ansible/parsing/yaml/__init__.py
similarity index 100%
rename from v2/ansible/parsing/yaml/__init__.py
rename to lib/ansible/parsing/yaml/__init__.py
diff --git a/v2/ansible/parsing/yaml/constructor.py b/lib/ansible/parsing/yaml/constructor.py
similarity index 100%
rename from v2/ansible/parsing/yaml/constructor.py
rename to lib/ansible/parsing/yaml/constructor.py
diff --git a/v2/ansible/parsing/yaml/loader.py b/lib/ansible/parsing/yaml/loader.py
similarity index 100%
rename from v2/ansible/parsing/yaml/loader.py
rename to lib/ansible/parsing/yaml/loader.py
diff --git a/v2/ansible/parsing/yaml/objects.py b/lib/ansible/parsing/yaml/objects.py
similarity index 100%
rename from v2/ansible/parsing/yaml/objects.py
rename to lib/ansible/parsing/yaml/objects.py
diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py
index 24ba2d3c6e0..40e6638f239 100644
--- a/lib/ansible/playbook/__init__.py
+++ b/lib/ansible/playbook/__init__.py
@@ -15,860 +15,71 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import ansible.inventory
-import ansible.constants as C
-import ansible.runner
-from ansible.utils.template import template
-from ansible import utils
-from ansible import errors
-from ansible.module_utils.splitter import split_args, unquote
-import ansible.callbacks
-import ansible.cache
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
import os
-import shlex
-import collections
-from play import Play
-import StringIO
-import pipes
-# the setup cache stores all variables about a host
-# gathered during the setup step, while the vars cache
-# holds all other variables about a host
-SETUP_CACHE = ansible.cache.FactCache()
-VARS_CACHE = collections.defaultdict(dict)
-RESERVED_TAGS = ['all','tagged','untagged','always']
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.parsing import DataLoader
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.play import Play
+from ansible.playbook.playbook_include import PlaybookInclude
+from ansible.plugins import push_basedir
-class PlayBook(object):
- '''
- runs an ansible playbook, given as a datastructure or YAML filename.
- A playbook is a deployment, config management, or automation based
- set of commands to run in series.
+__all__ = ['Playbook']
- multiple plays/tasks do not execute simultaneously, but tasks in each
- pattern do execute in parallel (according to the number of forks
- requested) among the hosts they address
- '''
- # *****************************************************
+class Playbook:
- def __init__(self,
- playbook = None,
- host_list = C.DEFAULT_HOST_LIST,
- module_path = None,
- forks = C.DEFAULT_FORKS,
- timeout = C.DEFAULT_TIMEOUT,
- remote_user = C.DEFAULT_REMOTE_USER,
- remote_pass = C.DEFAULT_REMOTE_PASS,
- remote_port = None,
- transport = C.DEFAULT_TRANSPORT,
- private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
- callbacks = None,
- runner_callbacks = None,
- stats = None,
- extra_vars = None,
- only_tags = None,
- skip_tags = None,
- subset = C.DEFAULT_SUBSET,
- inventory = None,
- check = False,
- diff = False,
- any_errors_fatal = False,
- vault_password = False,
- force_handlers = False,
- # privilege escalation
- become = C.DEFAULT_BECOME,
- become_method = C.DEFAULT_BECOME_METHOD,
- become_user = C.DEFAULT_BECOME_USER,
- become_pass = None,
- ):
+ def __init__(self, loader):
+ # Entries in the datastructure of a playbook may
+ # be either a play or an include statement
+ self._entries = []
+ self._basedir = os.getcwd()
+ self._loader = loader
- """
- playbook: path to a playbook file
- host_list: path to a file like /etc/ansible/hosts
- module_path: path to ansible modules, like /usr/share/ansible/
- forks: desired level of parallelism
- timeout: connection timeout
- remote_user: run as this user if not specified in a particular play
- remote_pass: use this remote password (for all plays) vs using SSH keys
- remote_port: default remote port to use if not specified with the host or play
- transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
- callbacks output callbacks for the playbook
- runner_callbacks: more callbacks, this time for the runner API
- stats: holds aggregrate data about events occurring to each host
- inventory: can be specified instead of host_list to use a pre-existing inventory object
- check: don't change anything, just try to detect some potential changes
- any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
- force_handlers: continue to notify and run handlers even if a task fails
- """
+ @staticmethod
+ def load(file_name, variable_manager=None, loader=None):
+ pb = Playbook(loader=loader)
+ pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
+ return pb
- self.SETUP_CACHE = SETUP_CACHE
- self.VARS_CACHE = VARS_CACHE
+ def _load_playbook_data(self, file_name, variable_manager):
- arguments = []
- if playbook is None:
- arguments.append('playbook')
- if callbacks is None:
- arguments.append('callbacks')
- if runner_callbacks is None:
- arguments.append('runner_callbacks')
- if stats is None:
- arguments.append('stats')
- if arguments:
- raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments))
-
- if extra_vars is None:
- extra_vars = {}
- if only_tags is None:
- only_tags = [ 'all' ]
- if skip_tags is None:
- skip_tags = []
-
- self.check = check
- self.diff = diff
- self.module_path = module_path
- self.forks = forks
- self.timeout = timeout
- self.remote_user = remote_user
- self.remote_pass = remote_pass
- self.remote_port = remote_port
- self.transport = transport
- self.callbacks = callbacks
- self.runner_callbacks = runner_callbacks
- self.stats = stats
- self.extra_vars = extra_vars
- self.global_vars = {}
- self.private_key_file = private_key_file
- self.only_tags = only_tags
- self.skip_tags = skip_tags
- self.any_errors_fatal = any_errors_fatal
- self.vault_password = vault_password
- self.force_handlers = force_handlers
-
- self.become = become
- self.become_method = become_method
- self.become_user = become_user
- self.become_pass = become_pass
-
- self.callbacks.playbook = self
- self.runner_callbacks.playbook = self
-
- if inventory is None:
- self.inventory = ansible.inventory.Inventory(host_list)
- self.inventory.subset(subset)
+ if os.path.isabs(file_name):
+ self._basedir = os.path.dirname(file_name)
else:
- self.inventory = inventory
+ self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
- if self.module_path is not None:
- utils.plugins.module_finder.add_directory(self.module_path)
+ # set the loaders basedir
+ self._loader.set_basedir(self._basedir)
- self.basedir = os.path.dirname(playbook) or '.'
- utils.plugins.push_basedir(self.basedir)
+ # also add the basedir to the list of module directories
+ push_basedir(self._basedir)
- # let inventory know the playbook basedir so it can load more vars
- self.inventory.set_playbook_basedir(self.basedir)
+ ds = self._loader.load_from_file(os.path.basename(file_name))
+ if not isinstance(ds, list):
+ raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
- vars = extra_vars.copy()
- vars['playbook_dir'] = os.path.abspath(self.basedir)
- if self.inventory.basedir() is not None:
- vars['inventory_dir'] = self.inventory.basedir()
-
- if self.inventory.src() is not None:
- vars['inventory_file'] = self.inventory.src()
-
- self.filename = playbook
- (self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars)
- ansible.callbacks.load_callback_plugins()
- ansible.callbacks.set_playbook(self.callbacks, self)
-
- self._ansible_version = utils.version_info(gitinfo=True)
-
- # *****************************************************
-
- def _get_playbook_vars(self, play_ds, existing_vars):
- '''
- Gets the vars specified with the play and blends them
- with any existing vars that have already been read in
- '''
- new_vars = existing_vars.copy()
- if 'vars' in play_ds:
- if isinstance(play_ds['vars'], dict):
- new_vars.update(play_ds['vars'])
- elif isinstance(play_ds['vars'], list):
- for v in play_ds['vars']:
- new_vars.update(v)
- return new_vars
-
- # *****************************************************
-
- def _get_include_info(self, play_ds, basedir, existing_vars={}):
- '''
- Gets any key=value pairs specified with the included file
- name and returns the merged vars along with the path
- '''
- new_vars = existing_vars.copy()
- tokens = split_args(play_ds.get('include', ''))
- for t in tokens[1:]:
- try:
- (k,v) = unquote(t).split("=", 1)
- new_vars[k] = template(basedir, v, new_vars)
- except ValueError, e:
- raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
-
- return (new_vars, unquote(tokens[0]))
-
- # *****************************************************
-
- def _get_playbook_vars_files(self, play_ds, existing_vars_files):
- new_vars_files = list(existing_vars_files)
- if 'vars_files' in play_ds:
- new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files'])
- return new_vars_files
-
- # *****************************************************
-
- def _extend_play_vars(self, play, vars={}):
- '''
- Extends the given play's variables with the additional specified vars.
- '''
-
- if 'vars' not in play or not play['vars']:
- # someone left out or put an empty "vars:" entry in their playbook
- return vars.copy()
-
- play_vars = None
- if isinstance(play['vars'], dict):
- play_vars = play['vars'].copy()
- play_vars.update(vars)
- elif isinstance(play['vars'], list):
- # nobody should really do this, but handle vars: a=1 b=2
- play_vars = play['vars'][:]
- play_vars.extend([{k:v} for k,v in vars.iteritems()])
-
- return play_vars
-
- # *****************************************************
-
- def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
- '''
- run top level error checking on playbooks and allow them to include other playbooks.
- '''
-
- playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password)
- accumulated_plays = []
- play_basedirs = []
-
- if type(playbook_data) != list:
- raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data))
-
- basedir = os.path.dirname(path) or '.'
- utils.plugins.push_basedir(basedir)
- for play in playbook_data:
- if type(play) != dict:
- raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
-
- if 'include' in play:
- # a playbook (list of plays) decided to include some other list of plays
- # from another file. The result is a flat list of plays in the end.
-
- play_vars = self._get_playbook_vars(play, vars)
- play_vars_files = self._get_playbook_vars_files(play, vars_files)
- inc_vars, inc_path = self._get_include_info(play, basedir, play_vars)
- play_vars.update(inc_vars)
-
- included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars))
- (plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files)
- for p in plays:
- # support for parameterized play includes works by passing
- # those variables along to the subservient play
- p['vars'] = self._extend_play_vars(p, play_vars)
- # now add in the vars_files
- p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files)
-
- accumulated_plays.extend(plays)
- play_basedirs.extend(basedirs)
+ # Parse the playbook entries. For plays, we simply parse them
+ # using the Play() object, and includes are parsed using the
+ # PlaybookInclude() object
+ for entry in ds:
+ if not isinstance(entry, dict):
+ raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
+ if 'include' in entry:
+ pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
+ self._entries.extend(pb._entries)
else:
+ entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
+ self._entries.append(entry_obj)
- # this is a normal (non-included play)
- accumulated_plays.append(play)
- play_basedirs.append(basedir)
+ def get_loader(self):
+ return self._loader
- return (accumulated_plays, play_basedirs)
-
- # *****************************************************
-
- def run(self):
- ''' run all patterns in the playbook '''
- plays = []
- matched_tags_all = set()
- unmatched_tags_all = set()
-
- # loop through all patterns and run them
- self.callbacks.on_start()
- for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
- play = Play(self, play_ds, play_basedir, vault_password=self.vault_password)
- assert play is not None
-
- matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
-
- matched_tags_all = matched_tags_all | matched_tags
- unmatched_tags_all = unmatched_tags_all | unmatched_tags
-
- # Remove tasks we wish to skip
- matched_tags = matched_tags - set(self.skip_tags)
-
- # if we have matched_tags, the play must be run.
- # if the play contains no tasks, assume we just want to gather facts
- # in this case there are actually 3 meta tasks (handler flushes) not 0
- # tasks, so that's why there's a check against 3
- if (len(matched_tags) > 0 or len(play.tasks()) == 3):
- plays.append(play)
-
- # if the playbook is invoked with --tags or --skip-tags that don't
- # exist at all in the playbooks then we need to raise an error so that
- # the user can correct the arguments.
- unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
- (matched_tags_all | unmatched_tags_all))
-
- for t in RESERVED_TAGS:
- unknown_tags.discard(t)
-
- if len(unknown_tags) > 0:
- for t in RESERVED_TAGS:
- unmatched_tags_all.discard(t)
- msg = 'tag(s) not found in playbook: %s. possible values: %s'
- unknown = ','.join(sorted(unknown_tags))
- unmatched = ','.join(sorted(unmatched_tags_all))
- raise errors.AnsibleError(msg % (unknown, unmatched))
-
- for play in plays:
- ansible.callbacks.set_play(self.callbacks, play)
- ansible.callbacks.set_play(self.runner_callbacks, play)
- if not self._run_play(play):
- break
-
- ansible.callbacks.set_play(self.callbacks, None)
- ansible.callbacks.set_play(self.runner_callbacks, None)
-
- # summarize the results
- results = {}
- for host in self.stats.processed.keys():
- results[host] = self.stats.summarize(host)
- return results
-
- # *****************************************************
-
- def _async_poll(self, poller, async_seconds, async_poll_interval):
- ''' launch an async job, if poll_interval is set, wait for completion '''
-
- results = poller.wait(async_seconds, async_poll_interval)
-
- # mark any hosts that are still listed as started as failed
- # since these likely got killed by async_wrapper
- for host in poller.hosts_to_poll:
- reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
- self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id'])
- results['contacted'][host] = reason
-
- return results
-
- # *****************************************************
-
- def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
- ''' returns a list of hosts that haven't failed and aren't dark '''
-
- return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
-
- # *****************************************************
-
- def _run_task_internal(self, task, include_failed=False):
- ''' run a particular module step in a playbook '''
-
- hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
- self.inventory.restrict_to(hosts)
-
- runner = ansible.runner.Runner(
- pattern=task.play.hosts,
- inventory=self.inventory,
- module_name=task.module_name,
- module_args=task.module_args,
- forks=self.forks,
- remote_pass=self.remote_pass,
- module_path=self.module_path,
- timeout=self.timeout,
- remote_user=task.remote_user,
- remote_port=task.play.remote_port,
- module_vars=task.module_vars,
- play_vars=task.play_vars,
- play_file_vars=task.play_file_vars,
- role_vars=task.role_vars,
- role_params=task.role_params,
- default_vars=task.default_vars,
- extra_vars=self.extra_vars,
- private_key_file=self.private_key_file,
- setup_cache=self.SETUP_CACHE,
- vars_cache=self.VARS_CACHE,
- basedir=task.play.basedir,
- conditional=task.when,
- callbacks=self.runner_callbacks,
- transport=task.transport,
- is_playbook=True,
- check=self.check,
- diff=self.diff,
- environment=task.environment,
- complex_args=task.args,
- accelerate=task.play.accelerate,
- accelerate_port=task.play.accelerate_port,
- accelerate_ipv6=task.play.accelerate_ipv6,
- error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
- vault_pass = self.vault_password,
- run_hosts=hosts,
- no_log=task.no_log,
- run_once=task.run_once,
- become=task.become,
- become_method=task.become_method,
- become_user=task.become_user,
- become_pass=task.become_pass,
- )
-
- runner.module_vars.update({'play_hosts': hosts})
- runner.module_vars.update({'ansible_version': self._ansible_version})
-
- if task.async_seconds == 0:
- results = runner.run()
- else:
- results, poller = runner.run_async(task.async_seconds)
- self.stats.compute(results)
- if task.async_poll_interval > 0:
- # if not polling, playbook requested fire and forget, so don't poll
- results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
- else:
- for (host, res) in results.get('contacted', {}).iteritems():
- self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id'])
-
- contacted = results.get('contacted',{})
- dark = results.get('dark', {})
-
- self.inventory.lift_restriction()
-
- if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
- return None
-
- return results
-
- # *****************************************************
-
- def _run_task(self, play, task, is_handler):
- ''' run a single task in the playbook and recursively run any subtasks. '''
-
- ansible.callbacks.set_task(self.callbacks, task)
- ansible.callbacks.set_task(self.runner_callbacks, task)
-
- if task.role_name:
- name = '%s | %s' % (task.role_name, task.name)
- else:
- name = task.name
-
- try:
- # v1 HACK: we don't have enough information to template many names
- # at this point. Rather than making this work for all cases in
- # v1, just make this degrade gracefully. Will fix in v2
- name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False)
- except:
- pass
-
- self.callbacks.on_task_start(name, is_handler)
- if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task:
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
- return True
-
- # template ignore_errors
- # TODO: Is this needed here? cond is templated again in
- # check_conditional after some more manipulations.
- # TODO: we don't have enough information here to template cond either
- # (see note on templating name above)
- cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False)
- task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
-
- # load up an appropriate ansible runner to run the task in parallel
- include_failed = is_handler and play.force_handlers
- results = self._run_task_internal(task, include_failed=include_failed)
-
- # if no hosts are matched, carry on
- hosts_remaining = True
- if results is None:
- hosts_remaining = False
- results = {}
-
- contacted = results.get('contacted', {})
- self.stats.compute(results, ignore_errors=task.ignore_errors)
-
- def _register_play_vars(host, result):
- # when 'register' is used, persist the result in the vars cache
- # rather than the setup cache - vars should be transient between
- # playbook executions
- if 'stdout' in result and 'stdout_lines' not in result:
- result['stdout_lines'] = result['stdout'].splitlines()
- utils.update_hash(self.VARS_CACHE, host, {task.register: result})
-
- def _save_play_facts(host, facts):
- # saves play facts in SETUP_CACHE, unless the module executed was
- # set_fact, in which case we add them to the VARS_CACHE
- if task.module_name in ('set_fact', 'include_vars'):
- utils.update_hash(self.VARS_CACHE, host, facts)
- else:
- utils.update_hash(self.SETUP_CACHE, host, facts)
-
- # add facts to the global setup cache
- for host, result in contacted.iteritems():
- if 'results' in result:
- # task ran with_ lookup plugin, so facts are encapsulated in
- # multiple list items in the results key
- for res in result['results']:
- if type(res) == dict:
- facts = res.get('ansible_facts', {})
- _save_play_facts(host, facts)
- else:
- # when facts are returned, persist them in the setup cache
- facts = result.get('ansible_facts', {})
- _save_play_facts(host, facts)
-
- # if requested, save the result into the registered variable name
- if task.register:
- _register_play_vars(host, result)
-
- # also have to register some failed, but ignored, tasks
- if task.ignore_errors and task.register:
- failed = results.get('failed', {})
- for host, result in failed.iteritems():
- _register_play_vars(host, result)
-
- # flag which notify handlers need to be run
- if len(task.notify) > 0:
- for host, results in results.get('contacted',{}).iteritems():
- if results.get('changed', False):
- for handler_name in task.notify:
- self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host)
-
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
- return hosts_remaining
-
- # *****************************************************
-
- def _flag_handler(self, play, handler_name, host):
- '''
- if a task has any notify elements, flag handlers for run
- at end of execution cycle for hosts that have indicated
- changes have been made
- '''
-
- found = False
- for x in play.handlers():
- if handler_name == template(play.basedir, x.name, x.module_vars):
- found = True
- self.callbacks.on_notify(host, x.name)
- x.notified_by.append(host)
- if not found:
- raise errors.AnsibleError("change handler (%s) is not defined" % handler_name)
-
- # *****************************************************
-
- def _do_setup_step(self, play):
- ''' get facts from the remote system '''
-
- host_list = self._trim_unavailable_hosts(play._play_hosts)
-
- if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart':
- host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]]
- if len(host_list) == 0:
- return {}
- elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'):
- return {}
-
- self.callbacks.on_setup()
- self.inventory.restrict_to(host_list)
-
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
-
- # push any variables down to the system
- setup_results = ansible.runner.Runner(
- basedir=self.basedir,
- pattern=play.hosts,
- module_name='setup',
- module_args={},
- inventory=self.inventory,
- forks=self.forks,
- module_path=self.module_path,
- timeout=self.timeout,
- remote_user=play.remote_user,
- remote_pass=self.remote_pass,
- remote_port=play.remote_port,
- private_key_file=self.private_key_file,
- setup_cache=self.SETUP_CACHE,
- vars_cache=self.VARS_CACHE,
- callbacks=self.runner_callbacks,
- become=play.become,
- become_method=play.become_method,
- become_user=play.become_user,
- become_pass=self.become_pass,
- vault_pass=self.vault_password,
- transport=play.transport,
- is_playbook=True,
- module_vars=play.vars,
- play_vars=play.vars,
- play_file_vars=play.vars_file_vars,
- role_vars=play.role_vars,
- default_vars=play.default_vars,
- check=self.check,
- diff=self.diff,
- accelerate=play.accelerate,
- accelerate_port=play.accelerate_port,
- ).run()
- self.stats.compute(setup_results, setup=True)
-
- self.inventory.lift_restriction()
-
- # now for each result, load into the setup cache so we can
- # let runner template out future commands
- setup_ok = setup_results.get('contacted', {})
- for (host, result) in setup_ok.iteritems():
- utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
- utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {}))
- return setup_results
-
- # *****************************************************
-
-
- def generate_retry_inventory(self, replay_hosts):
- '''
- called by /usr/bin/ansible when a playbook run fails. It generates an inventory
- that allows re-running on ONLY the failed hosts. This may duplicate some
- variable information in group_vars/host_vars but that is ok, and expected.
- '''
-
- buf = StringIO.StringIO()
- for x in replay_hosts:
- buf.write("%s\n" % x)
- basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH)
- filename = "%s.retry" % os.path.basename(self.filename)
- filename = filename.replace(".yml","")
- filename = os.path.join(basedir, filename)
-
- try:
- if not os.path.exists(basedir):
- os.makedirs(basedir)
-
- fd = open(filename, 'w')
- fd.write(buf.getvalue())
- fd.close()
- except:
- ansible.callbacks.display(
- "\nERROR: could not create retry file. Check the value of \n"
- + "the configuration variable 'retry_files_save_path' or set \n"
- + "'retry_files_enabled' to False to avoid this message.\n",
- color='red'
- )
- return None
-
- return filename
-
- # *****************************************************
- def tasks_to_run_in_play(self, play):
-
- tasks = []
-
- for task in play.tasks():
- # only run the task if the requested tags match or has 'always' tag
- u = set(['untagged'])
- task_set = set(task.tags)
-
- if 'always' in task.tags:
- should_run = True
- else:
- if 'all' in self.only_tags:
- should_run = True
- else:
- should_run = False
- if 'tagged' in self.only_tags:
- if task_set != u:
- should_run = True
- elif 'untagged' in self.only_tags:
- if task_set == u:
- should_run = True
- else:
- if task_set.intersection(self.only_tags):
- should_run = True
-
- # Check for tags that we need to skip
- if 'all' in self.skip_tags:
- should_run = False
- else:
- if 'tagged' in self.skip_tags:
- if task_set != u:
- should_run = False
- elif 'untagged' in self.skip_tags:
- if task_set == u:
- should_run = False
- else:
- if should_run:
- if task_set.intersection(self.skip_tags):
- should_run = False
-
- if should_run:
- tasks.append(task)
-
- return tasks
-
- # *****************************************************
- def _run_play(self, play):
- ''' run a list of tasks for a given pattern, in order '''
-
- self.callbacks.on_play_start(play.name)
- # Get the hosts for this play
- play._play_hosts = self.inventory.list_hosts(play.hosts)
- # if no hosts matches this play, drop out
- if not play._play_hosts:
- self.callbacks.on_no_hosts_matched()
- return True
-
- # get facts from system
- self._do_setup_step(play)
-
- # now with that data, handle contentional variable file imports!
- all_hosts = self._trim_unavailable_hosts(play._play_hosts)
- play.update_vars_files(all_hosts, vault_password=self.vault_password)
- hosts_count = len(all_hosts)
-
- if play.serial.endswith("%"):
-
- # This is a percentage, so calculate it based on the
- # number of hosts
- serial_pct = int(play.serial.replace("%",""))
- serial = int((serial_pct/100.0) * len(all_hosts))
-
- # Ensure that no matter how small the percentage, serial
- # can never fall below 1, so that things actually happen
- serial = max(serial, 1)
- else:
- serial = int(play.serial)
-
- serialized_batch = []
- if serial <= 0:
- serialized_batch = [all_hosts]
- else:
- # do N forks all the way through before moving to next
- while len(all_hosts) > 0:
- play_hosts = []
- for x in range(serial):
- if len(all_hosts) > 0:
- play_hosts.append(all_hosts.pop(0))
- serialized_batch.append(play_hosts)
-
- task_errors = False
- for on_hosts in serialized_batch:
-
- # restrict the play to just the hosts we have in our on_hosts block that are
- # available.
- play._play_hosts = self._trim_unavailable_hosts(on_hosts)
- self.inventory.also_restrict_to(on_hosts)
-
- for task in self.tasks_to_run_in_play(play):
-
- if task.meta is not None:
- # meta tasks can force handlers to run mid-play
- if task.meta == 'flush_handlers':
- self.run_handlers(play)
-
- # skip calling the handler till the play is finished
- continue
-
- if not self._run_task(play, task, False):
- # whether no hosts matched is fatal or not depends if it was on the initial step.
- # if we got exactly no hosts on the first step (setup!) then the host group
- # just didn't match anything and that's ok
- return False
-
- # Get a new list of what hosts are left as available, the ones that
- # did not go fail/dark during the task
- host_list = self._trim_unavailable_hosts(play._play_hosts)
-
- # Set max_fail_pct to 0, So if any hosts fails, bail out
- if task.any_errors_fatal and len(host_list) < hosts_count:
- play.max_fail_pct = 0
-
- # If threshold for max nodes failed is exceeded, bail out.
- if play.serial > 0:
- # if serial is set, we need to shorten the size of host_count
- play_count = len(play._play_hosts)
- if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count):
- host_list = None
- else:
- if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
- host_list = None
-
- # if no hosts remain, drop out
- if not host_list:
- if play.force_handlers:
- task_errors = True
- break
- else:
- self.callbacks.on_no_hosts_remaining()
- return False
-
- # lift restrictions after each play finishes
- self.inventory.lift_also_restriction()
-
- if task_errors and not play.force_handlers:
- # if there were failed tasks and handler execution
- # is not forced, quit the play with an error
- return False
- else:
- # no errors, go ahead and execute all handlers
- if not self.run_handlers(play):
- return False
-
- return True
-
-
- def run_handlers(self, play):
- on_hosts = play._play_hosts
- hosts_count = len(on_hosts)
- for task in play.tasks():
- if task.meta is not None:
-
- fired_names = {}
- for handler in play.handlers():
- if len(handler.notified_by) > 0:
- self.inventory.restrict_to(handler.notified_by)
-
- # Resolve the variables first
- handler_name = template(play.basedir, handler.name, handler.module_vars)
- if handler_name not in fired_names:
- self._run_task(play, handler, True)
- # prevent duplicate handler includes from running more than once
- fired_names[handler_name] = 1
-
- host_list = self._trim_unavailable_hosts(play._play_hosts)
- if handler.any_errors_fatal and len(host_list) < hosts_count:
- play.max_fail_pct = 0
- if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
- host_list = None
- if not host_list and not play.force_handlers:
- self.callbacks.on_no_hosts_remaining()
- return False
-
- self.inventory.lift_restriction()
- new_list = handler.notified_by[:]
- for host in handler.notified_by:
- if host in on_hosts:
- while host in new_list:
- new_list.remove(host)
- handler.notified_by = new_list
-
- continue
-
- return True
+ def get_plays(self):
+ return self._entries[:]
diff --git a/v2/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py
similarity index 100%
rename from v2/ansible/playbook/attribute.py
rename to lib/ansible/playbook/attribute.py
diff --git a/v2/ansible/playbook/base.py b/lib/ansible/playbook/base.py
similarity index 100%
rename from v2/ansible/playbook/base.py
rename to lib/ansible/playbook/base.py
diff --git a/v2/ansible/playbook/become.py b/lib/ansible/playbook/become.py
similarity index 100%
rename from v2/ansible/playbook/become.py
rename to lib/ansible/playbook/become.py
diff --git a/v2/ansible/playbook/block.py b/lib/ansible/playbook/block.py
similarity index 100%
rename from v2/ansible/playbook/block.py
rename to lib/ansible/playbook/block.py
diff --git a/v2/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py
similarity index 100%
rename from v2/ansible/playbook/conditional.py
rename to lib/ansible/playbook/conditional.py
diff --git a/v2/ansible/playbook/handler.py b/lib/ansible/playbook/handler.py
similarity index 100%
rename from v2/ansible/playbook/handler.py
rename to lib/ansible/playbook/handler.py
diff --git a/v2/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py
similarity index 100%
rename from v2/ansible/playbook/helpers.py
rename to lib/ansible/playbook/helpers.py
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index 6ee85e0bf48..b99c01fdf74 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -15,935 +15,249 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-#############################################
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible.utils.template import template
-from ansible import utils
-from ansible import errors
-from ansible.playbook.task import Task
-from ansible.module_utils.splitter import split_args, unquote
-import ansible.constants as C
-import pipes
-import shlex
-import os
-import sys
-import uuid
+from ansible.errors import AnsibleError, AnsibleParserError
+
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.become import Become
+from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
+from ansible.playbook.block import Block
+
+from ansible.utils.vars import combine_vars
-class Play(object):
+__all__ = ['Play']
- _pb_common = [
- 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
- 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
- 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
- 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
- 'vault_password',
- ]
- __slots__ = _pb_common + [
- '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir',
- 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port',
- 'role_vars', 'transport', 'vars_file_vars',
- ]
+class Play(Base, Taggable, Become):
- # to catch typos and so forth -- these are userland names
- # and don't line up 1:1 with how they are stored
- VALID_KEYS = frozenset(_pb_common + [
- 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks',
- 'pre_tasks', 'role_names', 'tasks', 'user',
- ])
+ """
+ A play is a language feature that represents a list of roles and/or
+ task/handler blocks to execute on a given set of hosts.
- # *************************************************
+ Usage:
- def __init__(self, playbook, ds, basedir, vault_password=None):
- ''' constructor loads from a play datastructure '''
+ Play.load(datastructure) -> Play
+ Play.something(...)
+ """
- for x in ds.keys():
- if not x in Play.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x)
+ # =================================================================================
+ # Connection-Related Attributes
- # allow all playbook keys to be set by --extra-vars
- self.vars = ds.get('vars', {})
- self.vars_prompt = ds.get('vars_prompt', {})
- self.playbook = playbook
- self.vars = self._get_vars()
- self.vars_file_vars = dict() # these are vars read in from vars_files:
- self.role_vars = dict() # these are vars read in from vars/main.yml files in roles
- self.basedir = basedir
- self.roles = ds.get('roles', None)
- self.tags = ds.get('tags', None)
- self.vault_password = vault_password
- self.environment = ds.get('environment', {})
+ # TODO: generalize connection
+ _accelerate = FieldAttribute(isa='bool', default=False)
+ _accelerate_ipv6 = FieldAttribute(isa='bool', default=False)
+ _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port
- if self.tags is None:
- self.tags = []
- elif type(self.tags) in [ str, unicode ]:
- self.tags = self.tags.split(",")
- elif type(self.tags) != list:
- self.tags = []
+ # Connection
+ _gather_facts = FieldAttribute(isa='string', default='smart')
+ _hosts = FieldAttribute(isa='list', default=[], required=True)
+ _name = FieldAttribute(isa='string', default='')
- # make sure we have some special internal variables set, which
- # we use later when loading tasks and handlers
- load_vars = dict()
- load_vars['playbook_dir'] = os.path.abspath(self.basedir)
- if self.playbook.inventory.basedir() is not None:
- load_vars['inventory_dir'] = self.playbook.inventory.basedir()
- if self.playbook.inventory.src() is not None:
- load_vars['inventory_file'] = self.playbook.inventory.src()
+ # Variable Attributes
+ _vars_files = FieldAttribute(isa='list', default=[])
+ _vars_prompt = FieldAttribute(isa='dict', default=dict())
+ _vault_password = FieldAttribute(isa='string')
- # We first load the vars files from the datastructure
- # so we have the default variables to pass into the roles
- self.vars_files = ds.get('vars_files', [])
- if not isinstance(self.vars_files, list):
- raise errors.AnsibleError('vars_files must be a list')
- processed_vars_files = self._update_vars_files_for_host(None)
+ # Block (Task) Lists Attributes
+ _handlers = FieldAttribute(isa='list', default=[])
+ _pre_tasks = FieldAttribute(isa='list', default=[])
+ _post_tasks = FieldAttribute(isa='list', default=[])
+ _tasks = FieldAttribute(isa='list', default=[])
- # now we load the roles into the datastructure
- self.included_roles = []
- ds = self._load_roles(self.roles, ds)
+ # Role Attributes
+ _roles = FieldAttribute(isa='list', default=[])
- # and finally re-process the vars files as they may have been updated
- # by the included roles, but exclude any which have been processed
- self.vars_files = utils.list_difference(ds.get('vars_files', []), processed_vars_files)
- if not isinstance(self.vars_files, list):
- raise errors.AnsibleError('vars_files must be a list')
+ # Flag/Setting Attributes
+ _any_errors_fatal = FieldAttribute(isa='bool', default=False)
+ _max_fail_percentage = FieldAttribute(isa='string', default='0')
+ _serial = FieldAttribute(isa='int', default=0)
+ _strategy = FieldAttribute(isa='string', default='linear')
- self._update_vars_files_for_host(None)
+ # =================================================================================
- # template everything to be efficient, but do not pre-mature template
- # tasks/handlers as they may have inventory scope overrides. We also
- # create a set of temporary variables for templating, so we don't
- # trample on the existing vars structures
- _tasks = ds.pop('tasks', [])
- _handlers = ds.pop('handlers', [])
+ def __init__(self):
+ super(Play, self).__init__()
- temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
- temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
+ def __repr__(self):
+ return self.get_name()
+
+ def get_name(self):
+ ''' return the name of the Play '''
+ return "PLAY: %s" % self._attributes.get('name')
+
+ @staticmethod
+ def load(data, variable_manager=None, loader=None):
+ p = Play()
+ return p.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def preprocess_data(self, ds):
+ '''
+ Adjusts play datastructure to cleanup old/legacy items
+ '''
+
+ assert isinstance(ds, dict)
+
+ # The use of 'user' in the Play datastructure was deprecated to
+ # line up with the same change for Tasks, due to the fact that
+ # 'user' conflicted with the user module.
+ if 'user' in ds:
+ # this should never happen, but error out with a helpful message
+ # to the user if it does...
+ if 'remote_user' in ds:
+ raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
+
+ ds['remote_user'] = ds['user']
+ del ds['user']
+
+ return super(Play, self).preprocess_data(ds)
+
+ def _load_vars(self, attr, ds):
+ '''
+ Vars in a play can be specified either as a dictionary directly, or
+ as a list of dictionaries. If the later, this method will turn the
+ list into a single dictionary.
+ '''
try:
- ds = template(basedir, ds, temp_vars)
- except errors.AnsibleError, e:
- utils.warning("non fatal error while trying to template play variables: %s" % (str(e)))
+ if isinstance(ds, dict):
+ return ds
+ elif isinstance(ds, list):
+ all_vars = dict()
+ for item in ds:
+ if not isinstance(item, dict):
+ raise ValueError
+ all_vars = combine_vars(all_vars, item)
+ return all_vars
+ else:
+ raise ValueError
+ except ValueError:
+ raise AnsibleParserError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds)
- ds['tasks'] = _tasks
- ds['handlers'] = _handlers
+ def _load_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
- self._ds = ds
+ def _load_pre_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
- hosts = ds.get('hosts')
- if hosts is None:
- raise errors.AnsibleError('hosts declaration is required')
- elif isinstance(hosts, list):
- try:
- hosts = ';'.join(hosts)
- except TypeError,e:
- raise errors.AnsibleError('improper host declaration: %s' % str(e))
+ def _load_post_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
- self.serial = str(ds.get('serial', 0))
- self.hosts = hosts
- self.name = ds.get('name', self.hosts)
- self._tasks = ds.get('tasks', [])
- self._handlers = ds.get('handlers', [])
- self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user))
- self.remote_port = ds.get('port', self.playbook.remote_port)
- self.transport = ds.get('connection', self.playbook.transport)
- self.remote_port = self.remote_port
- self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false'))
- self.accelerate = utils.boolean(ds.get('accelerate', 'false'))
- self.accelerate_port = ds.get('accelerate_port', None)
- self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
- self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
- self.no_log = utils.boolean(ds.get('no_log', 'false'))
- self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
+ def _load_handlers(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed handlers/blocks.
+ Bare handlers outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader)
- # Fail out if user specifies conflicting privilege escalations
- if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
- raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together')
- if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together')
- if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
+ def _load_roles(self, attr, ds):
+ '''
+ Loads and returns a list of RoleInclude objects from the datastructure
+ list of role definitions and creates the Role from those objects
+ '''
- # become settings are inherited and updated normally
- self.become = ds.get('become', self.playbook.become)
- self.become_method = ds.get('become_method', self.playbook.become_method)
- self.become_user = ds.get('become_user', self.playbook.become_user)
+ role_includes = load_list_of_roles(ds, variable_manager=self._variable_manager, loader=self._loader)
- # Make sure current play settings are reflected in become fields
- if 'sudo' in ds:
- self.become=ds['sudo']
- self.become_method='sudo'
- if 'sudo_user' in ds:
- self.become_user=ds['sudo_user']
- elif 'su' in ds:
- self.become=True
- self.become=ds['su']
- self.become_method='su'
- if 'su_user' in ds:
- self.become_user=ds['su_user']
+ roles = []
+ for ri in role_includes:
+ roles.append(Role.load(ri))
+ return roles
- # gather_facts is not a simple boolean, as None means that a 'smart'
- # fact gathering mode will be used, so we need to be careful here as
- # calling utils.boolean(None) returns False
- self.gather_facts = ds.get('gather_facts', None)
- if self.gather_facts is not None:
- self.gather_facts = utils.boolean(self.gather_facts)
+ # FIXME: post_validation needs to ensure that become/su/sudo have only 1 set
- load_vars['role_names'] = ds.get('role_names', [])
+ def _compile_roles(self):
+ '''
+ Handles the role compilation step, returning a flat list of tasks
+ with the lowest level dependencies first. For example, if a role R
+ has a dependency D1, which also has a dependency D2, the tasks from
+ D2 are merged first, followed by D1, and lastly by the tasks from
+ the parent role R last. This is done for all roles in the Play.
+ '''
- self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars)
- self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars)
+ block_list = []
- # apply any missing tags to role tasks
- self._late_merge_role_tags()
+ if len(self.roles) > 0:
+ for r in self.roles:
+ block_list.extend(r.compile(play=self))
- # place holder for the discovered hosts to be used in this play
- self._play_hosts = None
+ return block_list
- # *************************************************
+ def compile(self):
+ '''
+ Compiles and returns the task list for this play, compiled from the
+ roles (which are themselves compiled recursively) and/or the list of
+ tasks specified in the play.
+ '''
- def _get_role_path(self, role):
- """
- Returns the path on disk to the directory containing
- the role directories like tasks, templates, etc. Also
- returns any variables that were included with the role
- """
- orig_path = template(self.basedir,role,self.vars)
+ block_list = []
- role_vars = {}
- if type(orig_path) == dict:
- # what, not a path?
- role_name = orig_path.get('role', None)
- if role_name is None:
- raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path)
- role_vars = orig_path
- else:
- role_name = utils.role_spec_parse(orig_path)["name"]
+ block_list.extend(self.pre_tasks)
+ block_list.extend(self._compile_roles())
+ block_list.extend(self.tasks)
+ block_list.extend(self.post_tasks)
- role_path = None
+ return block_list
- possible_paths = [
- utils.path_dwim(self.basedir, os.path.join('roles', role_name)),
- utils.path_dwim(self.basedir, role_name)
- ]
+ def get_vars(self):
+ return self.vars.copy()
- if C.DEFAULT_ROLES_PATH:
- search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep)
- for loc in search_locations:
- loc = os.path.expanduser(loc)
- possible_paths.append(utils.path_dwim(loc, role_name))
+ def get_vars_files(self):
+ return self.vars_files
- for path_option in possible_paths:
- if os.path.isdir(path_option):
- role_path = path_option
- break
+ def get_handlers(self):
+ return self.handlers[:]
- if role_path is None:
- raise errors.AnsibleError("cannot find role in %s" % " or ".join(possible_paths))
+ def get_roles(self):
+ return self.roles[:]
- return (role_path, role_vars)
+ def get_tasks(self):
+ tasklist = []
+ for task in self.pre_tasks + self.tasks + self.post_tasks:
+ if isinstance(task, Block):
+ tasklist.append(task.block + task.rescue + task.always)
+ else:
+ tasklist.append(task)
+ return tasklist
- def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0):
- # this number is arbitrary, but it seems sane
- if level > 20:
- raise errors.AnsibleError("too many levels of recursion while resolving role dependencies")
- for role in roles:
- role_path,role_vars = self._get_role_path(role)
+ def serialize(self):
+ data = super(Play, self).serialize()
- # save just the role params for this role, which exclude the special
- # keywords 'role', 'tags', and 'when'.
- role_params = role_vars.copy()
- for item in ('role', 'tags', 'when'):
- if item in role_params:
- del role_params[item]
+ roles = []
+ for role in self.get_roles():
+ roles.append(role.serialize())
+ data['roles'] = roles
- role_vars = utils.combine_vars(passed_vars, role_vars)
+ return data
- vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')))
- vars_data = {}
- if os.path.isfile(vars):
- vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
- if vars_data:
- if not isinstance(vars_data, dict):
- raise errors.AnsibleError("vars from '%s' are not a dict" % vars)
- role_vars = utils.combine_vars(vars_data, role_vars)
+ def deserialize(self, data):
+ super(Play, self).deserialize(data)
- defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')))
- defaults_data = {}
- if os.path.isfile(defaults):
- defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
-
- # the meta directory contains the yaml that should
- # hold the list of dependencies (if any)
- meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')))
- if os.path.isfile(meta):
- data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
- if data:
- dependencies = data.get('dependencies',[])
- if dependencies is None:
- dependencies = []
- for dep in dependencies:
- allow_dupes = False
- (dep_path,dep_vars) = self._get_role_path(dep)
-
- # save the dep params, just as we did above
- dep_params = dep_vars.copy()
- for item in ('role', 'tags', 'when'):
- if item in dep_params:
- del dep_params[item]
-
- meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta')))
- if os.path.isfile(meta):
- meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
- if meta_data:
- allow_dupes = utils.boolean(meta_data.get('allow_duplicates',''))
-
- # if any tags were specified as role/dep variables, merge
- # them into the current dep_vars so they're passed on to any
- # further dependencies too, and so we only have one place
- # (dep_vars) to look for tags going forward
- def __merge_tags(var_obj):
- old_tags = dep_vars.get('tags', [])
- if isinstance(old_tags, basestring):
- old_tags = [old_tags, ]
- if isinstance(var_obj, dict):
- new_tags = var_obj.get('tags', [])
- if isinstance(new_tags, basestring):
- new_tags = [new_tags, ]
- else:
- new_tags = []
- return list(set(old_tags).union(set(new_tags)))
-
- dep_vars['tags'] = __merge_tags(role_vars)
- dep_vars['tags'] = __merge_tags(passed_vars)
-
- # if tags are set from this role, merge them
- # into the tags list for the dependent role
- if "tags" in passed_vars:
- for included_role_dep in dep_stack:
- included_dep_name = included_role_dep[0]
- included_dep_vars = included_role_dep[2]
- if included_dep_name == dep:
- if "tags" in included_dep_vars:
- included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"])))
- else:
- included_dep_vars["tags"] = passed_vars["tags"][:]
-
- dep_vars = utils.combine_vars(passed_vars, dep_vars)
- dep_vars = utils.combine_vars(role_vars, dep_vars)
-
- vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars')))
- vars_data = {}
- if os.path.isfile(vars):
- vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
- if vars_data:
- dep_vars = utils.combine_vars(dep_vars, vars_data)
- pass
-
- defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults')))
- dep_defaults_data = {}
- if os.path.isfile(defaults):
- dep_defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
- if 'role' in dep_vars:
- del dep_vars['role']
-
- if not allow_dupes:
- if dep in self.included_roles:
- # skip back to the top, since we don't want to
- # do anything else with this role
- continue
- else:
- self.included_roles.append(dep)
-
- def _merge_conditional(cur_conditionals, new_conditionals):
- if isinstance(new_conditionals, (basestring, bool)):
- cur_conditionals.append(new_conditionals)
- elif isinstance(new_conditionals, list):
- cur_conditionals.extend(new_conditionals)
-
- # pass along conditionals from roles to dep roles
- passed_when = passed_vars.get('when')
- role_when = role_vars.get('when')
- dep_when = dep_vars.get('when')
-
- tmpcond = []
- _merge_conditional(tmpcond, passed_when)
- _merge_conditional(tmpcond, role_when)
- _merge_conditional(tmpcond, dep_when)
-
- if len(tmpcond) > 0:
- dep_vars['when'] = tmpcond
-
- self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1)
- dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data])
-
- # only add the current role when we're at the top level,
- # otherwise we'll end up in a recursive loop
- if level == 0:
- self.included_roles.append(role)
- dep_stack.append([role, role_path, role_vars, role_params, defaults_data])
- return dep_stack
-
- def _load_role_vars_files(self, vars_files):
- # process variables stored in vars/main.yml files
- role_vars = {}
- for filename in vars_files:
- if os.path.exists(filename):
- new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
- if new_vars:
- if type(new_vars) != dict:
- raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars)))
- role_vars = utils.combine_vars(role_vars, new_vars)
-
- return role_vars
-
- def _load_role_defaults(self, defaults_files):
- # process default variables
- default_vars = {}
- for filename in defaults_files:
- if os.path.exists(filename):
- new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
- if new_default_vars:
- if type(new_default_vars) != dict:
- raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars)))
- default_vars = utils.combine_vars(default_vars, new_default_vars)
-
- return default_vars
-
- def _load_roles(self, roles, ds):
- # a role is a name that auto-includes the following if they exist
- # /tasks/main.yml
- # /handlers/main.yml
- # /vars/main.yml
- # /library
- # and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found
-
- if roles is None:
+ if 'roles' in data:
+ role_data = data.get('roles', [])
roles = []
- if type(roles) != list:
- raise errors.AnsibleError("value of 'roles:' must be a list")
+ for role in role_data:
+ r = Role()
+ r.deserialize(role)
+ roles.append(r)
- new_tasks = []
- new_handlers = []
- role_vars_files = []
- defaults_files = []
+ setattr(self, 'roles', roles)
+ del data['roles']
- pre_tasks = ds.get('pre_tasks', None)
- if type(pre_tasks) != list:
- pre_tasks = []
- for x in pre_tasks:
- new_tasks.append(x)
-
- # flush handlers after pre_tasks
- new_tasks.append(dict(meta='flush_handlers'))
-
- roles = self._build_role_dependencies(roles, [], {})
-
- # give each role an uuid and
- # make role_path available as variable to the task
- for idx, val in enumerate(roles):
- this_uuid = str(uuid.uuid4())
- roles[idx][-3]['role_uuid'] = this_uuid
- roles[idx][-3]['role_path'] = roles[idx][1]
-
- role_names = []
-
- for (role, role_path, role_vars, role_params, default_vars) in roles:
- # special vars must be extracted from the dict to the included tasks
- special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ]
- special_vars = {}
- for k in special_keys:
- if k in role_vars:
- special_vars[k] = role_vars[k]
-
- task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks'))
- handler_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'handlers'))
- vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))
- meta_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))
- defaults_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))
-
- task = self._resolve_main(task_basepath)
- handler = self._resolve_main(handler_basepath)
- vars_file = self._resolve_main(vars_basepath)
- meta_file = self._resolve_main(meta_basepath)
- defaults_file = self._resolve_main(defaults_basepath)
-
- library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library'))
-
- missing = lambda f: not os.path.isfile(f)
- if missing(task) and missing(handler) and missing(vars_file) and missing(defaults_file) and missing(meta_file) and not os.path.isdir(library):
- raise errors.AnsibleError("found role at %s, but cannot find %s or %s or %s or %s or %s or %s" % (role_path, task, handler, vars_file, defaults_file, meta_file, library))
-
- if isinstance(role, dict):
- role_name = role['role']
- else:
- role_name = utils.role_spec_parse(role)["name"]
-
- role_names.append(role_name)
- if os.path.isfile(task):
- nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name)
- for k in special_keys:
- if k in special_vars:
- nt[k] = special_vars[k]
- new_tasks.append(nt)
- if os.path.isfile(handler):
- nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name)
- for k in special_keys:
- if k in special_vars:
- nt[k] = special_vars[k]
- new_handlers.append(nt)
- if os.path.isfile(vars_file):
- role_vars_files.append(vars_file)
- if os.path.isfile(defaults_file):
- defaults_files.append(defaults_file)
- if os.path.isdir(library):
- utils.plugins.module_finder.add_directory(library)
-
- tasks = ds.get('tasks', None)
- post_tasks = ds.get('post_tasks', None)
- handlers = ds.get('handlers', None)
- vars_files = ds.get('vars_files', None)
-
- if type(tasks) != list:
- tasks = []
- if type(handlers) != list:
- handlers = []
- if type(vars_files) != list:
- vars_files = []
- if type(post_tasks) != list:
- post_tasks = []
-
- new_tasks.extend(tasks)
- # flush handlers after tasks + role tasks
- new_tasks.append(dict(meta='flush_handlers'))
- new_tasks.extend(post_tasks)
- # flush handlers after post tasks
- new_tasks.append(dict(meta='flush_handlers'))
-
- new_handlers.extend(handlers)
-
- ds['tasks'] = new_tasks
- ds['handlers'] = new_handlers
- ds['role_names'] = role_names
-
- self.role_vars = self._load_role_vars_files(role_vars_files)
- self.default_vars = self._load_role_defaults(defaults_files)
-
- return ds
-
- # *************************************************
-
- def _resolve_main(self, basepath):
- ''' flexibly handle variations in main filenames '''
- # these filenames are acceptable:
- mains = (
- os.path.join(basepath, 'main'),
- os.path.join(basepath, 'main.yml'),
- os.path.join(basepath, 'main.yaml'),
- os.path.join(basepath, 'main.json'),
- )
- if sum([os.path.isfile(x) for x in mains]) > 1:
- raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
- else:
- for m in mains:
- if os.path.isfile(m):
- return m # exactly one main file
- return mains[0] # zero mains (we still need to return something)
-
- # *************************************************
-
- def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None,
- additional_conditions=None, original_file=None, role_name=None):
- ''' handle task and handler include statements '''
-
- results = []
- if tasks is None:
- # support empty handler files, and the like.
- tasks = []
- if additional_conditions is None:
- additional_conditions = []
- if vars is None:
- vars = {}
- if role_params is None:
- role_params = {}
- if default_vars is None:
- default_vars = {}
- if become_vars is None:
- become_vars = {}
-
- old_conditions = list(additional_conditions)
-
- for x in tasks:
-
- # prevent assigning the same conditions to each task on an include
- included_additional_conditions = list(old_conditions)
-
- if not isinstance(x, dict):
- raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file))
-
- # evaluate privilege escalation vars for current and child tasks
- included_become_vars = {}
- for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]:
- if k in x:
- included_become_vars[k] = x[k]
- elif k in become_vars:
- included_become_vars[k] = become_vars[k]
- x[k] = become_vars[k]
-
- task_vars = vars.copy()
- if original_file:
- task_vars['_original_file'] = original_file
-
- if 'meta' in x:
- if x['meta'] == 'flush_handlers':
- if role_name and 'role_name' not in x:
- x['role_name'] = role_name
- results.append(Task(self, x, module_vars=task_vars, role_name=role_name))
- continue
-
- if 'include' in x:
- tokens = split_args(str(x['include']))
- included_additional_conditions = list(additional_conditions)
- include_vars = {}
- for k in x:
- if k.startswith("with_"):
- if original_file:
- offender = " (in %s)" % original_file
- else:
- offender = ""
- utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True)
- elif k.startswith("when_"):
- utils.deprecated("\"when_:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True)
- elif k == 'when':
- if isinstance(x[k], (basestring, bool)):
- included_additional_conditions.append(x[k])
- elif type(x[k]) is list:
- included_additional_conditions.extend(x[k])
- elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"):
- continue
- else:
- include_vars[k] = x[k]
-
- # get any role parameters specified
- role_params = x.get('role_params', {})
-
- # get any role default variables specified
- default_vars = x.get('default_vars', {})
- if not default_vars:
- default_vars = self.default_vars
- else:
- default_vars = utils.combine_vars(self.default_vars, default_vars)
-
- # append the vars defined with the include (from above)
- # as well as the old-style 'vars' element. The old-style
- # vars are given higher precedence here (just in case)
- task_vars = utils.combine_vars(task_vars, include_vars)
- if 'vars' in x:
- task_vars = utils.combine_vars(task_vars, x['vars'])
-
- new_role = None
- if 'role_name' in x:
- new_role = x['role_name']
-
- mv = task_vars.copy()
- for t in tokens[1:]:
- (k,v) = t.split("=", 1)
- v = unquote(v)
- mv[k] = template(self.basedir, v, mv)
- dirname = self.basedir
- if original_file:
- dirname = os.path.dirname(original_file)
-
- # temp vars are used here to avoid trampling on the existing vars structures
- temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
- temp_vars = utils.combine_vars(temp_vars, mv)
- temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
- include_file = template(dirname, tokens[0], temp_vars)
- include_filename = utils.path_dwim(dirname, include_file)
-
- data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password)
- if 'role_name' in x and data is not None:
- for y in data:
- if isinstance(y, dict) and 'include' in y:
- y['role_name'] = new_role
- loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
- results += loaded
- elif type(x) == dict:
- task = Task(
- self, x,
- module_vars=task_vars,
- play_vars=self.vars,
- play_file_vars=self.vars_file_vars,
- role_vars=self.role_vars,
- role_params=role_params,
- default_vars=default_vars,
- additional_conditions=list(additional_conditions),
- role_name=role_name
- )
- results.append(task)
- else:
- raise Exception("unexpected task type")
-
- for x in results:
- if self.tags is not None:
- x.tags.extend(self.tags)
-
- return results
-
- # *************************************************
-
- def tasks(self):
- ''' return task objects for this play '''
- return self._tasks
-
- def handlers(self):
- ''' return handler objects for this play '''
- return self._handlers
-
- # *************************************************
-
- def _get_vars(self):
- ''' load the vars section from a play, accounting for all sorts of variable features
- including loading from yaml files, prompting, and conditional includes of the first
- file found in a list. '''
-
- if self.vars is None:
- self.vars = {}
-
- if type(self.vars) not in [dict, list]:
- raise errors.AnsibleError("'vars' section must contain only key/value pairs")
-
- vars = {}
-
- # translate a list of vars into a dict
- if type(self.vars) == list:
- for item in self.vars:
- if getattr(item, 'items', None) is None:
- raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
- k, v = item.items()[0]
- vars[k] = v
- else:
- vars.update(self.vars)
-
- if type(self.vars_prompt) == list:
- for var in self.vars_prompt:
- if not 'name' in var:
- raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
-
- vname = var['name']
- prompt = var.get("prompt", vname)
- default = var.get("default", None)
- private = var.get("private", True)
-
- confirm = var.get("confirm", False)
- encrypt = var.get("encrypt", None)
- salt_size = var.get("salt_size", None)
- salt = var.get("salt", None)
-
- if vname not in self.playbook.extra_vars:
- vars[vname] = self.playbook.callbacks.on_vars_prompt(
- vname, private, prompt, encrypt, confirm, salt_size, salt, default
- )
-
- elif type(self.vars_prompt) == dict:
- for (vname, prompt) in self.vars_prompt.iteritems():
- prompt_msg = "%s: " % prompt
- if vname not in self.playbook.extra_vars:
- vars[vname] = self.playbook.callbacks.on_vars_prompt(
- varname=vname, private=False, prompt=prompt_msg, default=None
- )
-
- else:
- raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
-
- if type(self.playbook.extra_vars) == dict:
- vars = utils.combine_vars(vars, self.playbook.extra_vars)
-
- return vars
-
- # *************************************************
-
- def update_vars_files(self, hosts, vault_password=None):
- ''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
-
- # now loop through all the hosts...
- for h in hosts:
- self._update_vars_files_for_host(h, vault_password=vault_password)
-
- # *************************************************
-
- def compare_tags(self, tags):
- ''' given a list of tags that the user has specified, return two lists:
- matched_tags: tags were found within the current play and match those given
- by the user
- unmatched_tags: tags that were found within the current play but do not match
- any provided by the user '''
-
- # gather all the tags in all the tasks and handlers into one list
- # FIXME: isn't this in self.tags already?
-
- all_tags = []
- for task in self._tasks:
- if not task.meta:
- all_tags.extend(task.tags)
- for handler in self._handlers:
- all_tags.extend(handler.tags)
-
- # compare the lists of tags using sets and return the matched and unmatched
- all_tags_set = set(all_tags)
- tags_set = set(tags)
-
- matched_tags = all_tags_set.intersection(tags_set)
- unmatched_tags = all_tags_set.difference(tags_set)
-
- a = set(['always'])
- u = set(['untagged'])
- if 'always' in all_tags_set:
- matched_tags = matched_tags.union(a)
- unmatched_tags = all_tags_set.difference(a)
-
- if 'all' in tags_set:
- matched_tags = matched_tags.union(all_tags_set)
- unmatched_tags = set()
-
- if 'tagged' in tags_set:
- matched_tags = all_tags_set.difference(u)
- unmatched_tags = u
-
- if 'untagged' in tags_set and 'untagged' in all_tags_set:
- matched_tags = matched_tags.union(u)
- unmatched_tags = unmatched_tags.difference(u)
-
- return matched_tags, unmatched_tags
-
- # *************************************************
-
- def _late_merge_role_tags(self):
- # build a local dict of tags for roles
- role_tags = {}
- for task in self._ds['tasks']:
- if 'role_name' in task:
- this_role = task['role_name'] + "-" + task['vars']['role_uuid']
-
- if this_role not in role_tags:
- role_tags[this_role] = []
-
- if 'tags' in task['vars']:
- if isinstance(task['vars']['tags'], basestring):
- role_tags[this_role] += shlex.split(task['vars']['tags'])
- else:
- role_tags[this_role] += task['vars']['tags']
-
- # apply each role's tags to its tasks
- for idx, val in enumerate(self._tasks):
- if getattr(val, 'role_name', None) is not None:
- this_role = val.role_name + "-" + val.module_vars['role_uuid']
- if this_role in role_tags:
- self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role]))
-
- # *************************************************
-
- def _update_vars_files_for_host(self, host, vault_password=None):
-
- def generate_filenames(host, inject, filename):
-
- """ Render the raw filename into 3 forms """
-
- # filename2 is the templated version of the filename, which will
- # be fully rendered if any variables contained within it are
- # non-inventory related
- filename2 = template(self.basedir, filename, self.vars)
-
- # filename3 is the same as filename2, but when the host object is
- # available, inventory variables will be expanded as well since the
- # name is templated with the injected variables
- filename3 = filename2
- if host is not None:
- filename3 = template(self.basedir, filename2, inject)
-
- # filename4 is the dwim'd path, but may also be mixed-scope, so we use
- # both play scoped vars and host scoped vars to template the filepath
- if utils.contains_vars(filename3) and host is not None:
- inject.update(self.vars)
- filename4 = template(self.basedir, filename3, inject)
- filename4 = utils.path_dwim(self.basedir, filename4)
- else:
- filename4 = utils.path_dwim(self.basedir, filename3)
-
- return filename2, filename3, filename4
-
-
- def update_vars_cache(host, data, target_filename=None):
-
- """ update a host's varscache with new var data """
-
- self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
- if target_filename:
- self.playbook.callbacks.on_import_for_host(host, target_filename)
-
- def process_files(filename, filename2, filename3, filename4, host=None):
-
- """ pseudo-algorithm for deciding where new vars should go """
-
- data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password)
- if data:
- if type(data) != dict:
- raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
- if host is not None:
- target_filename = None
- if utils.contains_vars(filename2):
- if not utils.contains_vars(filename3):
- target_filename = filename3
- else:
- target_filename = filename4
- update_vars_cache(host, data, target_filename=target_filename)
- else:
- self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data)
- # we did process this file
- return True
- # we did not process this file
- return False
-
- # Enforce that vars_files is always a list
- if type(self.vars_files) != list:
- self.vars_files = [ self.vars_files ]
-
- # Build an inject if this is a host run started by self.update_vars_files
- if host is not None:
- inject = {}
- inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password))
- inject.update(self.playbook.SETUP_CACHE.get(host, {}))
- inject.update(self.playbook.VARS_CACHE.get(host, {}))
- else:
- inject = None
-
- processed = []
- for filename in self.vars_files:
- if type(filename) == list:
- # loop over all filenames, loading the first one, and failing if none found
- found = False
- sequence = []
- for real_filename in filename:
- filename2, filename3, filename4 = generate_filenames(host, inject, real_filename)
- sequence.append(filename4)
- if os.path.exists(filename4):
- found = True
- if process_files(filename, filename2, filename3, filename4, host=host):
- processed.append(filename)
- elif host is not None:
- self.playbook.callbacks.on_not_import_for_host(host, filename4)
- if found:
- break
- if not found and host is not None:
- raise errors.AnsibleError(
- "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
- )
- else:
- # just one filename supplied, load it!
- filename2, filename3, filename4 = generate_filenames(host, inject, filename)
- if utils.contains_vars(filename4):
- continue
- if process_files(filename, filename2, filename3, filename4, host=host):
- processed.append(filename)
-
- return processed
diff --git a/v2/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py
similarity index 100%
rename from v2/ansible/playbook/playbook_include.py
rename to lib/ansible/playbook/playbook_include.py
diff --git a/v2/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py
similarity index 100%
rename from v2/ansible/playbook/role/__init__.py
rename to lib/ansible/playbook/role/__init__.py
diff --git a/v2/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py
similarity index 100%
rename from v2/ansible/playbook/role/definition.py
rename to lib/ansible/playbook/role/definition.py
diff --git a/v2/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py
similarity index 100%
rename from v2/ansible/playbook/role/include.py
rename to lib/ansible/playbook/role/include.py
diff --git a/v2/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py
similarity index 100%
rename from v2/ansible/playbook/role/metadata.py
rename to lib/ansible/playbook/role/metadata.py
diff --git a/v2/ansible/playbook/role/requirement.py b/lib/ansible/playbook/role/requirement.py
similarity index 100%
rename from v2/ansible/playbook/role/requirement.py
rename to lib/ansible/playbook/role/requirement.py
diff --git a/v2/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py
similarity index 100%
rename from v2/ansible/playbook/taggable.py
rename to lib/ansible/playbook/taggable.py
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
index 70c1bc8df6b..06060257985 100644
--- a/lib/ansible/playbook/task.py
+++ b/lib/ansible/playbook/task.py
@@ -15,332 +15,296 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-from ansible import errors
-from ansible import utils
-from ansible.module_utils.splitter import split_args
-import os
-import ansible.utils.template as template
-import sys
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-class Task(object):
+from ansible.errors import AnsibleError
- _t_common = [
- 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass',
- 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when',
- 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log',
- 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user',
- 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when',
- ]
+from ansible.parsing.mod_args import ModuleArgsParser
+from ansible.parsing.splitter import parse_kv
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
- __slots__ = [
- 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file',
- 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars',
- 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars',
- ] + _t_common
+from ansible.plugins import module_loader, lookup_loader
- # to prevent typos and such
- VALID_KEYS = frozenset([
- 'async', 'connection', 'include', 'poll',
- ] + _t_common)
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.become import Become
+from ansible.playbook.block import Block
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
- def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None):
- ''' constructor loads from a task or handler datastructure '''
+__all__ = ['Task']
- # meta directives are used to tell things like ansible/playbook to run
- # operations like handler execution. Meta tasks are not executed
- # normally.
- if 'meta' in ds:
- self.meta = ds['meta']
- self.tags = []
- self.module_vars = module_vars
- self.role_name = role_name
- return
- else:
- self.meta = None
+class Task(Base, Conditional, Taggable, Become):
+ """
+ A task is a language feature that represents a call to a module, with given arguments and other parameters.
+ A handler is a subclass of a task.
- library = os.path.join(play.basedir, 'library')
- if os.path.exists(library):
- utils.plugins.module_finder.add_directory(library)
+ Usage:
- for x in ds.keys():
+ Task.load(datastructure) -> Task
+ Task.something(...)
+ """
- # code to allow for saying "modulename: args" versus "action: modulename args"
- if x in utils.plugins.module_finder:
+ # =================================================================================
+ # ATTRIBUTES
+ # load_ and
+ # validate_
+ # will be used if defined
+ # might be possible to define others
- if 'action' in ds:
- raise errors.AnsibleError("multiple actions specified in task: '%s' and '%s'" % (x, ds.get('name', ds['action'])))
- if isinstance(ds[x], dict):
- if 'args' in ds:
- raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x]))))
- ds['args'] = ds[x]
- ds[x] = ''
- elif ds[x] is None:
- ds[x] = ''
- if not isinstance(ds[x], basestring):
- raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x])))
- ds['action'] = x + " " + ds[x]
- ds.pop(x)
+ _args = FieldAttribute(isa='dict', default=dict())
+ _action = FieldAttribute(isa='string')
- # code to allow "with_glob" and to reference a lookup plugin named glob
- elif x.startswith("with_"):
- if isinstance(ds[x], basestring):
- param = ds[x].strip()
+ _always_run = FieldAttribute(isa='bool')
+ _any_errors_fatal = FieldAttribute(isa='bool')
+ _async = FieldAttribute(isa='int', default=0)
+ _changed_when = FieldAttribute(isa='string')
+ _delay = FieldAttribute(isa='int', default=5)
+ _delegate_to = FieldAttribute(isa='string')
+ _failed_when = FieldAttribute(isa='string')
+ _first_available_file = FieldAttribute(isa='list')
+ _ignore_errors = FieldAttribute(isa='bool')
- plugin_name = x.replace("with_","")
- if plugin_name in utils.plugins.lookup_loader:
- ds['items_lookup_plugin'] = plugin_name
- ds['items_lookup_terms'] = ds[x]
- ds.pop(x)
- else:
- raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
+ _loop = FieldAttribute(isa='string', private=True)
+ _loop_args = FieldAttribute(isa='list', private=True)
+ _local_action = FieldAttribute(isa='string')
- elif x in [ 'changed_when', 'failed_when', 'when']:
- if isinstance(ds[x], basestring):
- param = ds[x].strip()
- # Only a variable, no logic
- if (param.startswith('{{') and
- param.find('}}') == len(ds[x]) - 2 and
- param.find('|') == -1):
- utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.")
- elif x.startswith("when_"):
- utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True)
+ # FIXME: this should not be a Task
+ _meta = FieldAttribute(isa='string')
- if 'when' in ds:
- raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action'])))
- when_name = x.replace("when_","")
- ds['when'] = "%s %s" % (when_name, ds[x])
- ds.pop(x)
- elif not x in Task.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x)
+ _name = FieldAttribute(isa='string', default='')
- self.module_vars = module_vars
- self.play_vars = play_vars
- self.play_file_vars = play_file_vars
- self.role_vars = role_vars
- self.role_params = role_params
- self.default_vars = default_vars
- self.play = play
+ _notify = FieldAttribute(isa='list')
+ _poll = FieldAttribute(isa='int')
+ _register = FieldAttribute(isa='string')
+ _retries = FieldAttribute(isa='int', default=1)
+ _run_once = FieldAttribute(isa='bool')
+ _until = FieldAttribute(isa='list') # ?
- # load various attributes
- self.name = ds.get('name', None)
- self.tags = [ 'untagged' ]
- self.register = ds.get('register', None)
- self.environment = ds.get('environment', play.environment)
- self.role_name = role_name
- self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log
- self.run_once = utils.boolean(ds.get('run_once', 'false'))
+ def __init__(self, block=None, role=None, task_include=None):
+ ''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
- #Code to allow do until feature in a Task
- if 'until' in ds:
- if not ds.get('register'):
- raise errors.AnsibleError("register keyword is mandatory when using do until feature")
- self.module_vars['delay'] = ds.get('delay', 5)
- self.module_vars['retries'] = ds.get('retries', 3)
- self.module_vars['register'] = ds.get('register', None)
- self.until = ds.get('until')
- self.module_vars['until'] = self.until
+ self._block = block
+ self._role = role
+ self._task_include = task_include
- # rather than simple key=value args on the options line, these represent structured data and the values
- # can be hashes and lists, not just scalars
- self.args = ds.get('args', {})
+ super(Task, self).__init__()
- # get remote_user for task, then play, then playbook
- if ds.get('remote_user') is not None:
- self.remote_user = ds.get('remote_user')
- elif ds.get('remote_user', play.remote_user) is not None:
- self.remote_user = ds.get('remote_user', play.remote_user)
- else:
- self.remote_user = ds.get('remote_user', play.playbook.remote_user)
+ def get_name(self):
+ ''' return the name of the task '''
- # Fail out if user specifies privilege escalation params in conflict
- if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')):
- raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
+ if self._role and self.name:
+ return "%s : %s" % (self._role.get_name(), self.name)
+ elif self.name:
+ return self.name
+ else:
+ flattened_args = self._merge_kv(self.args)
+ if self._role:
+ return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args)
+ else:
+ return "%s %s" % (self.action, flattened_args)
- if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name)
+ def _merge_kv(self, ds):
+ if ds is None:
+ return ""
+ elif isinstance(ds, basestring):
+ return ds
+ elif isinstance(ds, dict):
+ buf = ""
+ for (k,v) in ds.iteritems():
+ if k.startswith('_'):
+ continue
+ buf = buf + "%s=%s " % (k,v)
+ buf = buf.strip()
+ return buf
- if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ t = Task(block=block, role=role, task_include=task_include)
+ return t.load_data(data, variable_manager=variable_manager, loader=loader)
- self.become = utils.boolean(ds.get('become', play.become))
- self.become_method = ds.get('become_method', play.become_method)
- self.become_user = ds.get('become_user', play.become_user)
- self.become_pass = ds.get('become_pass', play.playbook.become_pass)
+ def __repr__(self):
+ ''' returns a human readable representation of the task '''
+ return "TASK: %s" % self.get_name()
- # set only if passed in current task data
- if 'sudo' in ds or 'sudo_user' in ds:
- self.become_method='sudo'
+ def _preprocess_loop(self, ds, new_ds, k, v):
+ ''' take a lookup plugin name and store it correctly '''
- if 'sudo' in ds:
- self.become=ds['sudo']
- del ds['sudo']
+ loop_name = k.replace("with_", "")
+ if new_ds.get('loop') is not None:
+ raise AnsibleError("duplicate loop in task: %s" % loop_name)
+ new_ds['loop'] = loop_name
+ new_ds['loop_args'] = v
+
+ def preprocess_data(self, ds):
+ '''
+ tasks are especially complex arguments so need pre-processing.
+ keep it short.
+ '''
+
+ assert isinstance(ds, dict)
+
+ # the new, cleaned datastructure, which will have legacy
+ # items reduced to a standard structure suitable for the
+ # attributes of the task class
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ # use the args parsing class to determine the action, args,
+ # and the delegate_to value from the various possible forms
+ # supported as legacy
+ args_parser = ModuleArgsParser(task_ds=ds)
+ (action, args, delegate_to) = args_parser.parse()
+
+ new_ds['action'] = action
+ new_ds['args'] = args
+ new_ds['delegate_to'] = delegate_to
+
+ for (k,v) in ds.iteritems():
+ if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
+ # we don't want to re-assign these values, which were
+ # determined by the ModuleArgsParser() above
+ continue
+ elif k.replace("with_", "") in lookup_loader:
+ self._preprocess_loop(ds, new_ds, k, v)
else:
- self.become=True
- if 'sudo_user' in ds:
- self.become_user = ds['sudo_user']
- del ds['sudo_user']
- if 'sudo_pass' in ds:
- self.become_pass = ds['sudo_pass']
- del ds['sudo_pass']
+ new_ds[k] = v
- elif 'su' in ds or 'su_user' in ds:
- self.become_method='su'
+ return super(Task, self).preprocess_data(new_ds)
- if 'su' in ds:
- self.become=ds['su']
+ def post_validate(self, templar):
+ '''
+ Override of base class post_validate, to also do final validation on
+ the block and task include (if any) to which this task belongs.
+ '''
+
+ if self._block:
+ self._block.post_validate(templar)
+ if self._task_include:
+ self._task_include.post_validate(templar)
+
+ super(Task, self).post_validate(templar)
+
+ def get_vars(self):
+ all_vars = self.vars.copy()
+ if self._block:
+ all_vars.update(self._block.get_vars())
+ if self._task_include:
+ all_vars.update(self._task_include.get_vars())
+
+ all_vars.update(self.serialize())
+
+ if 'tags' in all_vars:
+ del all_vars['tags']
+ if 'when' in all_vars:
+ del all_vars['when']
+ return all_vars
+
+ def copy(self, exclude_block=False):
+ new_me = super(Task, self).copy()
+
+ new_me._block = None
+ if self._block and not exclude_block:
+ new_me._block = self._block.copy()
+
+ new_me._role = None
+ if self._role:
+ new_me._role = self._role
+
+ new_me._task_include = None
+ if self._task_include:
+ new_me._task_include = self._task_include.copy()
+
+ return new_me
+
+ def serialize(self):
+ data = super(Task, self).serialize()
+
+ if self._block:
+ data['block'] = self._block.serialize()
+
+ if self._role:
+ data['role'] = self._role.serialize()
+
+ if self._task_include:
+ data['task_include'] = self._task_include.serialize()
+
+ return data
+
+ def deserialize(self, data):
+
+ # import is here to avoid import loops
+ #from ansible.playbook.task_include import TaskInclude
+
+ block_data = data.get('block')
+
+ if block_data:
+ b = Block()
+ b.deserialize(block_data)
+ self._block = b
+ del data['block']
+
+ role_data = data.get('role')
+ if role_data:
+ r = Role()
+ r.deserialize(role_data)
+ self._role = r
+ del data['role']
+
+ ti_data = data.get('task_include')
+ if ti_data:
+ #ti = TaskInclude()
+ ti = Task()
+ ti.deserialize(ti_data)
+ self._task_include = ti
+ del data['task_include']
+
+ super(Task, self).deserialize(data)
+
+ def evaluate_conditional(self, all_vars):
+ if self._block is not None:
+ if not self._block.evaluate_conditional(all_vars):
+ return False
+ if self._task_include is not None:
+ if not self._task_include.evaluate_conditional(all_vars):
+ return False
+ return super(Task, self).evaluate_conditional(all_vars)
+
+ def set_loader(self, loader):
+ '''
+ Sets the loader on this object and recursively on parent, child objects.
+ This is used primarily after the Task has been serialized/deserialized, which
+ does not preserve the loader.
+ '''
+
+ self._loader = loader
+
+ if self._block:
+ self._block.set_loader(loader)
+ if self._task_include:
+ self._task_include.set_loader(loader)
+
+ def _get_parent_attribute(self, attr, extend=False):
+ '''
+ Generic logic to get the attribute or parent attribute for a task value.
+ '''
+ value = self._attributes[attr]
+ if self._block and (not value or extend):
+ parent_value = getattr(self._block, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
else:
- self.become=True
- del ds['su']
- if 'su_user' in ds:
- self.become_user = ds['su_user']
- del ds['su_user']
- if 'su_pass' in ds:
- self.become_pass = ds['su_pass']
- del ds['su_pass']
-
- # Both are defined
- if ('action' in ds) and ('local_action' in ds):
- raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together")
- # Both are NOT defined
- elif (not 'action' in ds) and (not 'local_action' in ds):
- raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', ''))
- # Only one of them is defined
- elif 'local_action' in ds:
- self.action = ds.get('local_action', '')
- self.delegate_to = '127.0.0.1'
- else:
- self.action = ds.get('action', '')
- self.delegate_to = ds.get('delegate_to', None)
- self.transport = ds.get('connection', ds.get('transport', play.transport))
-
- if isinstance(self.action, dict):
- if 'module' not in self.action:
- raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action))
- if self.args:
- raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action))
- self.args = self.action
- self.action = self.args.pop('module')
-
- # delegate_to can use variables
- if not (self.delegate_to is None):
- # delegate_to: localhost should use local transport
- if self.delegate_to in ['127.0.0.1', 'localhost']:
- self.transport = 'local'
-
- # notified by is used by Playbook code to flag which hosts
- # need to run a notifier
- self.notified_by = []
-
- # if no name is specified, use the action line as the name
- if self.name is None:
- self.name = self.action
-
- # load various attributes
- self.when = ds.get('when', None)
- self.changed_when = ds.get('changed_when', None)
- self.failed_when = ds.get('failed_when', None)
-
- # combine the default and module vars here for use in templating
- all_vars = self.default_vars.copy()
- all_vars = utils.combine_vars(all_vars, self.play_vars)
- all_vars = utils.combine_vars(all_vars, self.play_file_vars)
- all_vars = utils.combine_vars(all_vars, self.role_vars)
- all_vars = utils.combine_vars(all_vars, self.module_vars)
- all_vars = utils.combine_vars(all_vars, self.role_params)
-
- self.async_seconds = ds.get('async', 0) # not async by default
- self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars)
- self.async_seconds = int(self.async_seconds)
- self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds
- self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, all_vars)
- self.async_poll_interval = int(self.async_poll_interval)
- self.notify = ds.get('notify', [])
- self.first_available_file = ds.get('first_available_file', None)
-
- self.items_lookup_plugin = ds.get('items_lookup_plugin', None)
- self.items_lookup_terms = ds.get('items_lookup_terms', None)
-
-
- self.ignore_errors = ds.get('ignore_errors', False)
- self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal)
-
- self.always_run = ds.get('always_run', False)
-
- # action should be a string
- if not isinstance(self.action, basestring):
- raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name))
-
- # notify can be a string or a list, store as a list
- if isinstance(self.notify, basestring):
- self.notify = [ self.notify ]
-
- # split the action line into a module name + arguments
- try:
- tokens = split_args(self.action)
- except Exception, e:
- if "unbalanced" in str(e):
- raise errors.AnsibleError("There was an error while parsing the task %s.\n" % repr(self.action) + \
- "Make sure quotes are matched or escaped properly")
+ value = parent_value
+ if self._task_include and (not value or extend):
+ parent_value = getattr(self._task_include, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
else:
- raise
- if len(tokens) < 1:
- raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name)
- self.module_name = tokens[0]
- self.module_args = ''
- if len(tokens) > 1:
- self.module_args = " ".join(tokens[1:])
+ value = parent_value
+ return value
- import_tags = self.module_vars.get('tags',[])
- if type(import_tags) in [int,float]:
- import_tags = str(import_tags)
- elif type(import_tags) in [str,unicode]:
- # allow the user to list comma delimited tags
- import_tags = import_tags.split(",")
-
- # handle mutually incompatible options
- incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ]
- if len(incompatibles) > 1:
- raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
-
- # make first_available_file accessible to Runner code
- if self.first_available_file:
- self.module_vars['first_available_file'] = self.first_available_file
- # make sure that the 'item' variable is set when using
- # first_available_file (issue #8220)
- if 'item' not in self.module_vars:
- self.module_vars['item'] = ''
-
- if self.items_lookup_plugin is not None:
- self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin
- self.module_vars['items_lookup_terms'] = self.items_lookup_terms
-
- # allow runner to see delegate_to option
- self.module_vars['delegate_to'] = self.delegate_to
-
- # make some task attributes accessible to Runner code
- self.module_vars['ignore_errors'] = self.ignore_errors
- self.module_vars['register'] = self.register
- self.module_vars['changed_when'] = self.changed_when
- self.module_vars['failed_when'] = self.failed_when
- self.module_vars['always_run'] = self.always_run
-
- # tags allow certain parts of a playbook to be run without running the whole playbook
- apply_tags = ds.get('tags', None)
- if apply_tags is not None:
- if type(apply_tags) in [ str, unicode ]:
- self.tags.append(apply_tags)
- elif type(apply_tags) in [ int, float ]:
- self.tags.append(str(apply_tags))
- elif type(apply_tags) == list:
- self.tags.extend(apply_tags)
- self.tags.extend(import_tags)
-
- if len(self.tags) > 1:
- self.tags.remove('untagged')
-
- if additional_conditions:
- new_conditions = additional_conditions[:]
- if self.when:
- new_conditions.append(self.when)
- self.when = new_conditions
diff --git a/v2/ansible/playbook/vars.py b/lib/ansible/playbook/vars.py
similarity index 100%
rename from v2/ansible/playbook/vars.py
rename to lib/ansible/playbook/vars.py
diff --git a/v2/ansible/playbook/vars_file.py b/lib/ansible/playbook/vars_file.py
similarity index 100%
rename from v2/ansible/playbook/vars_file.py
rename to lib/ansible/playbook/vars_file.py
diff --git a/v2/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py
similarity index 100%
rename from v2/ansible/plugins/__init__.py
rename to lib/ansible/plugins/__init__.py
diff --git a/v2/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py
similarity index 100%
rename from v2/ansible/plugins/action/__init__.py
rename to lib/ansible/plugins/action/__init__.py
diff --git a/v2/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py
similarity index 100%
rename from v2/ansible/plugins/action/add_host.py
rename to lib/ansible/plugins/action/add_host.py
diff --git a/v2/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py
similarity index 100%
rename from v2/ansible/plugins/action/assemble.py
rename to lib/ansible/plugins/action/assemble.py
diff --git a/v2/ansible/plugins/action/assert.py b/lib/ansible/plugins/action/assert.py
similarity index 100%
rename from v2/ansible/plugins/action/assert.py
rename to lib/ansible/plugins/action/assert.py
diff --git a/v2/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py
similarity index 100%
rename from v2/ansible/plugins/action/async.py
rename to lib/ansible/plugins/action/async.py
diff --git a/v2/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py
similarity index 100%
rename from v2/ansible/plugins/action/copy.py
rename to lib/ansible/plugins/action/copy.py
diff --git a/v2/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py
similarity index 100%
rename from v2/ansible/plugins/action/debug.py
rename to lib/ansible/plugins/action/debug.py
diff --git a/v2/ansible/plugins/action/fail.py b/lib/ansible/plugins/action/fail.py
similarity index 100%
rename from v2/ansible/plugins/action/fail.py
rename to lib/ansible/plugins/action/fail.py
diff --git a/v2/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py
similarity index 100%
rename from v2/ansible/plugins/action/fetch.py
rename to lib/ansible/plugins/action/fetch.py
diff --git a/v2/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py
similarity index 100%
rename from v2/ansible/plugins/action/group_by.py
rename to lib/ansible/plugins/action/group_by.py
diff --git a/v2/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py
similarity index 100%
rename from v2/ansible/plugins/action/include_vars.py
rename to lib/ansible/plugins/action/include_vars.py
diff --git a/v2/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py
similarity index 100%
rename from v2/ansible/plugins/action/normal.py
rename to lib/ansible/plugins/action/normal.py
diff --git a/v2/ansible/plugins/action/patch.py b/lib/ansible/plugins/action/patch.py
similarity index 100%
rename from v2/ansible/plugins/action/patch.py
rename to lib/ansible/plugins/action/patch.py
diff --git a/v2/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py
similarity index 100%
rename from v2/ansible/plugins/action/pause.py
rename to lib/ansible/plugins/action/pause.py
diff --git a/v2/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py
similarity index 100%
rename from v2/ansible/plugins/action/raw.py
rename to lib/ansible/plugins/action/raw.py
diff --git a/v2/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py
similarity index 100%
rename from v2/ansible/plugins/action/script.py
rename to lib/ansible/plugins/action/script.py
diff --git a/v2/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py
similarity index 100%
rename from v2/ansible/plugins/action/set_fact.py
rename to lib/ansible/plugins/action/set_fact.py
diff --git a/v2/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py
similarity index 100%
rename from v2/ansible/plugins/action/synchronize.py
rename to lib/ansible/plugins/action/synchronize.py
diff --git a/v2/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py
similarity index 100%
rename from v2/ansible/plugins/action/template.py
rename to lib/ansible/plugins/action/template.py
diff --git a/v2/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py
similarity index 100%
rename from v2/ansible/plugins/action/unarchive.py
rename to lib/ansible/plugins/action/unarchive.py
diff --git a/v2/ansible/plugins/cache/__init__.py b/lib/ansible/plugins/cache/__init__.py
similarity index 100%
rename from v2/ansible/plugins/cache/__init__.py
rename to lib/ansible/plugins/cache/__init__.py
diff --git a/v2/ansible/plugins/cache/base.py b/lib/ansible/plugins/cache/base.py
similarity index 100%
rename from v2/ansible/plugins/cache/base.py
rename to lib/ansible/plugins/cache/base.py
diff --git a/v2/ansible/plugins/cache/memcached.py b/lib/ansible/plugins/cache/memcached.py
similarity index 100%
rename from v2/ansible/plugins/cache/memcached.py
rename to lib/ansible/plugins/cache/memcached.py
diff --git a/v2/ansible/plugins/cache/memory.py b/lib/ansible/plugins/cache/memory.py
similarity index 100%
rename from v2/ansible/plugins/cache/memory.py
rename to lib/ansible/plugins/cache/memory.py
diff --git a/v2/ansible/plugins/cache/redis.py b/lib/ansible/plugins/cache/redis.py
similarity index 100%
rename from v2/ansible/plugins/cache/redis.py
rename to lib/ansible/plugins/cache/redis.py
diff --git a/v2/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py
similarity index 100%
rename from v2/ansible/plugins/callback/__init__.py
rename to lib/ansible/plugins/callback/__init__.py
diff --git a/v2/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py
similarity index 100%
rename from v2/ansible/plugins/callback/default.py
rename to lib/ansible/plugins/callback/default.py
diff --git a/v2/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py
similarity index 100%
rename from v2/ansible/plugins/callback/minimal.py
rename to lib/ansible/plugins/callback/minimal.py
diff --git a/v2/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py
similarity index 100%
rename from v2/ansible/plugins/connections/__init__.py
rename to lib/ansible/plugins/connections/__init__.py
diff --git a/v2/ansible/plugins/connections/accelerate.py b/lib/ansible/plugins/connections/accelerate.py
similarity index 100%
rename from v2/ansible/plugins/connections/accelerate.py
rename to lib/ansible/plugins/connections/accelerate.py
diff --git a/v2/ansible/plugins/connections/chroot.py b/lib/ansible/plugins/connections/chroot.py
similarity index 100%
rename from v2/ansible/plugins/connections/chroot.py
rename to lib/ansible/plugins/connections/chroot.py
diff --git a/v2/ansible/plugins/connections/funcd.py b/lib/ansible/plugins/connections/funcd.py
similarity index 100%
rename from v2/ansible/plugins/connections/funcd.py
rename to lib/ansible/plugins/connections/funcd.py
diff --git a/v2/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connections/jail.py
similarity index 100%
rename from v2/ansible/plugins/connections/jail.py
rename to lib/ansible/plugins/connections/jail.py
diff --git a/v2/ansible/plugins/connections/libvirt_lxc.py b/lib/ansible/plugins/connections/libvirt_lxc.py
similarity index 100%
rename from v2/ansible/plugins/connections/libvirt_lxc.py
rename to lib/ansible/plugins/connections/libvirt_lxc.py
diff --git a/v2/ansible/plugins/connections/local.py b/lib/ansible/plugins/connections/local.py
similarity index 100%
rename from v2/ansible/plugins/connections/local.py
rename to lib/ansible/plugins/connections/local.py
diff --git a/v2/ansible/plugins/connections/paramiko_ssh.py b/lib/ansible/plugins/connections/paramiko_ssh.py
similarity index 100%
rename from v2/ansible/plugins/connections/paramiko_ssh.py
rename to lib/ansible/plugins/connections/paramiko_ssh.py
diff --git a/v2/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py
similarity index 100%
rename from v2/ansible/plugins/connections/ssh.py
rename to lib/ansible/plugins/connections/ssh.py
diff --git a/v2/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py
similarity index 100%
rename from v2/ansible/plugins/connections/winrm.py
rename to lib/ansible/plugins/connections/winrm.py
diff --git a/v2/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connections/zone.py
similarity index 100%
rename from v2/ansible/plugins/connections/zone.py
rename to lib/ansible/plugins/connections/zone.py
diff --git a/v2/ansible/plugins/filter b/lib/ansible/plugins/filter
similarity index 100%
rename from v2/ansible/plugins/filter
rename to lib/ansible/plugins/filter
diff --git a/v2/ansible/plugins/inventory/__init__.py b/lib/ansible/plugins/inventory/__init__.py
similarity index 100%
rename from v2/ansible/plugins/inventory/__init__.py
rename to lib/ansible/plugins/inventory/__init__.py
diff --git a/v2/ansible/plugins/inventory/aggregate.py b/lib/ansible/plugins/inventory/aggregate.py
similarity index 100%
rename from v2/ansible/plugins/inventory/aggregate.py
rename to lib/ansible/plugins/inventory/aggregate.py
diff --git a/v2/ansible/plugins/inventory/directory.py b/lib/ansible/plugins/inventory/directory.py
similarity index 100%
rename from v2/ansible/plugins/inventory/directory.py
rename to lib/ansible/plugins/inventory/directory.py
diff --git a/v2/ansible/plugins/inventory/ini.py b/lib/ansible/plugins/inventory/ini.py
similarity index 100%
rename from v2/ansible/plugins/inventory/ini.py
rename to lib/ansible/plugins/inventory/ini.py
diff --git a/v2/ansible/plugins/lookup/__init__.py b/lib/ansible/plugins/lookup/__init__.py
similarity index 100%
rename from v2/ansible/plugins/lookup/__init__.py
rename to lib/ansible/plugins/lookup/__init__.py
diff --git a/v2/ansible/plugins/lookup/cartesian.py b/lib/ansible/plugins/lookup/cartesian.py
similarity index 100%
rename from v2/ansible/plugins/lookup/cartesian.py
rename to lib/ansible/plugins/lookup/cartesian.py
diff --git a/v2/ansible/plugins/lookup/csvfile.py b/lib/ansible/plugins/lookup/csvfile.py
similarity index 100%
rename from v2/ansible/plugins/lookup/csvfile.py
rename to lib/ansible/plugins/lookup/csvfile.py
diff --git a/v2/ansible/plugins/lookup/dict.py b/lib/ansible/plugins/lookup/dict.py
similarity index 100%
rename from v2/ansible/plugins/lookup/dict.py
rename to lib/ansible/plugins/lookup/dict.py
diff --git a/v2/ansible/plugins/lookup/dnstxt.py b/lib/ansible/plugins/lookup/dnstxt.py
similarity index 100%
rename from v2/ansible/plugins/lookup/dnstxt.py
rename to lib/ansible/plugins/lookup/dnstxt.py
diff --git a/v2/ansible/plugins/lookup/env.py b/lib/ansible/plugins/lookup/env.py
similarity index 100%
rename from v2/ansible/plugins/lookup/env.py
rename to lib/ansible/plugins/lookup/env.py
diff --git a/v2/ansible/plugins/lookup/etcd.py b/lib/ansible/plugins/lookup/etcd.py
similarity index 100%
rename from v2/ansible/plugins/lookup/etcd.py
rename to lib/ansible/plugins/lookup/etcd.py
diff --git a/v2/ansible/plugins/lookup/file.py b/lib/ansible/plugins/lookup/file.py
similarity index 100%
rename from v2/ansible/plugins/lookup/file.py
rename to lib/ansible/plugins/lookup/file.py
diff --git a/v2/ansible/plugins/lookup/fileglob.py b/lib/ansible/plugins/lookup/fileglob.py
similarity index 100%
rename from v2/ansible/plugins/lookup/fileglob.py
rename to lib/ansible/plugins/lookup/fileglob.py
diff --git a/v2/ansible/plugins/lookup/first_found.py b/lib/ansible/plugins/lookup/first_found.py
similarity index 100%
rename from v2/ansible/plugins/lookup/first_found.py
rename to lib/ansible/plugins/lookup/first_found.py
diff --git a/v2/ansible/plugins/lookup/flattened.py b/lib/ansible/plugins/lookup/flattened.py
similarity index 100%
rename from v2/ansible/plugins/lookup/flattened.py
rename to lib/ansible/plugins/lookup/flattened.py
diff --git a/v2/ansible/plugins/lookup/indexed_items.py b/lib/ansible/plugins/lookup/indexed_items.py
similarity index 100%
rename from v2/ansible/plugins/lookup/indexed_items.py
rename to lib/ansible/plugins/lookup/indexed_items.py
diff --git a/v2/ansible/plugins/lookup/inventory_hostnames.py b/lib/ansible/plugins/lookup/inventory_hostnames.py
similarity index 100%
rename from v2/ansible/plugins/lookup/inventory_hostnames.py
rename to lib/ansible/plugins/lookup/inventory_hostnames.py
diff --git a/v2/ansible/plugins/lookup/items.py b/lib/ansible/plugins/lookup/items.py
similarity index 100%
rename from v2/ansible/plugins/lookup/items.py
rename to lib/ansible/plugins/lookup/items.py
diff --git a/v2/ansible/plugins/lookup/lines.py b/lib/ansible/plugins/lookup/lines.py
similarity index 100%
rename from v2/ansible/plugins/lookup/lines.py
rename to lib/ansible/plugins/lookup/lines.py
diff --git a/v2/ansible/plugins/lookup/nested.py b/lib/ansible/plugins/lookup/nested.py
similarity index 100%
rename from v2/ansible/plugins/lookup/nested.py
rename to lib/ansible/plugins/lookup/nested.py
diff --git a/v2/ansible/plugins/lookup/password.py b/lib/ansible/plugins/lookup/password.py
similarity index 100%
rename from v2/ansible/plugins/lookup/password.py
rename to lib/ansible/plugins/lookup/password.py
diff --git a/v2/ansible/plugins/lookup/pipe.py b/lib/ansible/plugins/lookup/pipe.py
similarity index 100%
rename from v2/ansible/plugins/lookup/pipe.py
rename to lib/ansible/plugins/lookup/pipe.py
diff --git a/v2/ansible/plugins/lookup/random_choice.py b/lib/ansible/plugins/lookup/random_choice.py
similarity index 100%
rename from v2/ansible/plugins/lookup/random_choice.py
rename to lib/ansible/plugins/lookup/random_choice.py
diff --git a/v2/ansible/plugins/lookup/redis_kv.py b/lib/ansible/plugins/lookup/redis_kv.py
similarity index 100%
rename from v2/ansible/plugins/lookup/redis_kv.py
rename to lib/ansible/plugins/lookup/redis_kv.py
diff --git a/v2/ansible/plugins/lookup/sequence.py b/lib/ansible/plugins/lookup/sequence.py
similarity index 100%
rename from v2/ansible/plugins/lookup/sequence.py
rename to lib/ansible/plugins/lookup/sequence.py
diff --git a/v2/ansible/plugins/lookup/subelements.py b/lib/ansible/plugins/lookup/subelements.py
similarity index 100%
rename from v2/ansible/plugins/lookup/subelements.py
rename to lib/ansible/plugins/lookup/subelements.py
diff --git a/v2/ansible/plugins/lookup/template.py b/lib/ansible/plugins/lookup/template.py
similarity index 100%
rename from v2/ansible/plugins/lookup/template.py
rename to lib/ansible/plugins/lookup/template.py
diff --git a/v2/ansible/plugins/lookup/together.py b/lib/ansible/plugins/lookup/together.py
similarity index 100%
rename from v2/ansible/plugins/lookup/together.py
rename to lib/ansible/plugins/lookup/together.py
diff --git a/v2/ansible/plugins/lookup/url.py b/lib/ansible/plugins/lookup/url.py
similarity index 100%
rename from v2/ansible/plugins/lookup/url.py
rename to lib/ansible/plugins/lookup/url.py
diff --git a/v2/ansible/plugins/shell/__init__.py b/lib/ansible/plugins/shell/__init__.py
similarity index 100%
rename from v2/ansible/plugins/shell/__init__.py
rename to lib/ansible/plugins/shell/__init__.py
diff --git a/v2/ansible/plugins/shell/csh.py b/lib/ansible/plugins/shell/csh.py
similarity index 100%
rename from v2/ansible/plugins/shell/csh.py
rename to lib/ansible/plugins/shell/csh.py
diff --git a/v2/ansible/plugins/shell/fish.py b/lib/ansible/plugins/shell/fish.py
similarity index 100%
rename from v2/ansible/plugins/shell/fish.py
rename to lib/ansible/plugins/shell/fish.py
diff --git a/v2/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py
similarity index 100%
rename from v2/ansible/plugins/shell/powershell.py
rename to lib/ansible/plugins/shell/powershell.py
diff --git a/v2/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py
similarity index 100%
rename from v2/ansible/plugins/shell/sh.py
rename to lib/ansible/plugins/shell/sh.py
diff --git a/v2/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py
similarity index 100%
rename from v2/ansible/plugins/strategies/__init__.py
rename to lib/ansible/plugins/strategies/__init__.py
diff --git a/v2/ansible/plugins/strategies/free.py b/lib/ansible/plugins/strategies/free.py
similarity index 100%
rename from v2/ansible/plugins/strategies/free.py
rename to lib/ansible/plugins/strategies/free.py
diff --git a/v2/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py
similarity index 100%
rename from v2/ansible/plugins/strategies/linear.py
rename to lib/ansible/plugins/strategies/linear.py
diff --git a/v2/ansible/plugins/vars/__init__.py b/lib/ansible/plugins/vars/__init__.py
similarity index 100%
rename from v2/ansible/plugins/vars/__init__.py
rename to lib/ansible/plugins/vars/__init__.py
diff --git a/v2/ansible/template/__init__.py b/lib/ansible/template/__init__.py
similarity index 100%
rename from v2/ansible/template/__init__.py
rename to lib/ansible/template/__init__.py
diff --git a/v2/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py
similarity index 100%
rename from v2/ansible/template/safe_eval.py
rename to lib/ansible/template/safe_eval.py
diff --git a/v2/ansible/template/template.py b/lib/ansible/template/template.py
similarity index 100%
rename from v2/ansible/template/template.py
rename to lib/ansible/template/template.py
diff --git a/v2/ansible/template/vars.py b/lib/ansible/template/vars.py
similarity index 100%
rename from v2/ansible/template/vars.py
rename to lib/ansible/template/vars.py
diff --git a/v2/test-requirements.txt b/lib/ansible/test-requirements.txt
similarity index 100%
rename from v2/test-requirements.txt
rename to lib/ansible/test-requirements.txt
diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py
index 7ed07a54c84..ae8ccff5952 100644
--- a/lib/ansible/utils/__init__.py
+++ b/lib/ansible/utils/__init__.py
@@ -15,1646 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import errno
-import sys
-import re
-import os
-import shlex
-import yaml
-import copy
-import optparse
-import operator
-from ansible import errors
-from ansible import __version__
-from ansible.utils.display_functions import *
-from ansible.utils.plugins import *
-from ansible.utils.su_prompts import *
-from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
-from ansible.callbacks import display
-from ansible.module_utils.splitter import split_args, unquote
-from ansible.module_utils.basic import heuristic_log_sanitize
-from ansible.utils.unicode import to_bytes, to_unicode
-import ansible.constants as C
-import ast
-import time
-import StringIO
-import stat
-import termios
-import tty
-import pipes
-import random
-import difflib
-import warnings
-import traceback
-import getpass
-import sys
-import subprocess
-import contextlib
-
-from vault import VaultLib
-
-VERBOSITY=0
-
-MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
-
-# caching the compilation of the regex used
-# to check for lookup calls within data
-LOOKUP_REGEX = re.compile(r'lookup\s*\(')
-PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
-CODE_REGEX = re.compile(r'(?:{%|%})')
-
-
-try:
- # simplejson can be much faster if it's available
- import simplejson as json
-except ImportError:
- import json
-
-try:
- from yaml import CSafeLoader as Loader
-except ImportError:
- from yaml import SafeLoader as Loader
-
-PASSLIB_AVAILABLE = False
-try:
- import passlib.hash
- PASSLIB_AVAILABLE = True
-except:
- pass
-
-try:
- import builtin
-except ImportError:
- import __builtin__ as builtin
-
-KEYCZAR_AVAILABLE=False
-try:
- try:
- # some versions of pycrypto may not have this?
- from Crypto.pct_warnings import PowmInsecureWarning
- except ImportError:
- PowmInsecureWarning = RuntimeWarning
-
- with warnings.catch_warnings(record=True) as warning_handler:
- warnings.simplefilter("error", PowmInsecureWarning)
- try:
- import keyczar.errors as key_errors
- from keyczar.keys import AesKey
- except PowmInsecureWarning:
- system_warning(
- "The version of gmp you have installed has a known issue regarding " + \
- "timing vulnerabilities when used with pycrypto. " + \
- "If possible, you should update it (i.e. yum update gmp)."
- )
- warnings.resetwarnings()
- warnings.simplefilter("ignore")
- import keyczar.errors as key_errors
- from keyczar.keys import AesKey
- KEYCZAR_AVAILABLE=True
-except ImportError:
- pass
-
-
-###############################################################
-# Abstractions around keyczar
-###############################################################
-
-def key_for_hostname(hostname):
- # fireball mode is an implementation of ansible firing up zeromq via SSH
- # to use no persistent daemons or key management
-
- if not KEYCZAR_AVAILABLE:
- raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
-
- key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
- if not os.path.exists(key_path):
- os.makedirs(key_path, mode=0700)
- os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
- elif not os.path.isdir(key_path):
- raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
-
- if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
- raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
-
- key_path = os.path.join(key_path, hostname)
-
- # use new AES keys every 2 hours, which means fireball must not allow running for longer either
- if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
- key = AesKey.Generate()
- fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
- fh = os.fdopen(fd, 'w')
- fh.write(str(key))
- fh.close()
- return key
- else:
- if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
- raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
- fh = open(key_path)
- key = AesKey.Read(fh.read())
- fh.close()
- return key
-
-def encrypt(key, msg):
- return key.Encrypt(msg)
-
-def decrypt(key, msg):
- try:
- return key.Decrypt(msg)
- except key_errors.InvalidSignatureError:
- raise errors.AnsibleError("decryption failed")
-
-###############################################################
-# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
-###############################################################
-
-def read_vault_file(vault_password_file):
- """Read a vault password from a file or if executable, execute the script and
- retrieve password from STDOUT
- """
- if vault_password_file:
- this_path = os.path.realpath(os.path.expanduser(vault_password_file))
- if is_executable(this_path):
- try:
- # STDERR not captured to make it easier for users to prompt for input in their scripts
- p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
- except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
- stdout, stderr = p.communicate()
- vault_pass = stdout.strip('\r\n')
- else:
- try:
- f = open(this_path, "rb")
- vault_pass=f.read().strip()
- f.close()
- except (OSError, IOError), e:
- raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
-
- return vault_pass
- else:
- return None
-
-def err(msg):
- ''' print an error message to stderr '''
-
- print >> sys.stderr, msg
-
-def exit(msg, rc=1):
- ''' quit with an error to stdout and a failure code '''
-
- err(msg)
- sys.exit(rc)
-
-def jsonify(result, format=False):
- ''' format JSON output (uncompressed or uncompressed) '''
-
- if result is None:
- return "{}"
- result2 = result.copy()
- for key, value in result2.items():
- if type(value) is str:
- result2[key] = value.decode('utf-8', 'ignore')
-
- indent = None
- if format:
- indent = 4
-
- try:
- return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
- except UnicodeDecodeError:
- return json.dumps(result2, sort_keys=True, indent=indent)
-
-def write_tree_file(tree, hostname, buf):
- ''' write something into treedir/hostname '''
-
- # TODO: might be nice to append playbook runs per host in a similar way
- # in which case, we'd want append mode.
- path = os.path.join(tree, hostname)
- fd = open(path, "w+")
- fd.write(buf)
- fd.close()
-
-def is_failed(result):
- ''' is a given JSON result a failed result? '''
-
- return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
-
-def is_changed(result):
- ''' is a given JSON result a changed result? '''
-
- return (result.get('changed', False) in [ True, 'True', 'true'])
-
-def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
- from ansible.utils import template
-
- if conditional is None or conditional == '':
- return True
-
- if isinstance(conditional, list):
- for x in conditional:
- if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
- return False
- return True
-
- if not isinstance(conditional, basestring):
- return conditional
-
- conditional = conditional.replace("jinja2_compare ","")
- # allow variable names
- if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
- conditional = to_unicode(inject[conditional], nonstring='simplerepr')
- conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
- original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
- # a Jinja2 evaluation that results in something Python can eval!
- presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
- conditional = template.template(basedir, presented, inject)
- val = conditional.strip()
- if val == presented:
- # the templating failed, meaning most likely a
- # variable was undefined. If we happened to be
- # looking for an undefined variable, return True,
- # otherwise fail
- if "is undefined" in conditional:
- return True
- elif "is defined" in conditional:
- return False
- else:
- raise errors.AnsibleError("error while evaluating conditional: %s" % original)
- elif val == "True":
- return True
- elif val == "False":
- return False
- else:
- raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
-
-def is_executable(path):
- '''is the given path executable?'''
- return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
- or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
- or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
-
-def unfrackpath(path):
- '''
- returns a path that is free of symlinks, environment
- variables, relative path traversals and symbols (~)
- example:
- '$HOME/../../var/mail' becomes '/var/spool/mail'
- '''
- return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
-
-def prepare_writeable_dir(tree,mode=0777):
- ''' make sure a directory exists and is writeable '''
-
- # modify the mode to ensure the owner at least
- # has read/write access to this directory
- mode |= 0700
-
- # make sure the tree path is always expanded
- # and normalized and free of symlinks
- tree = unfrackpath(tree)
-
- if not os.path.exists(tree):
- try:
- os.makedirs(tree, mode)
- except (IOError, OSError), e:
- raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
- if not os.access(tree, os.W_OK):
- raise errors.AnsibleError("Cannot write to path %s" % tree)
- return tree
-
-def path_dwim(basedir, given):
- '''
- make relative paths work like folks expect.
- '''
-
- if given.startswith("'"):
- given = given[1:-1]
-
- if given.startswith("/"):
- return os.path.abspath(given)
- elif given.startswith("~"):
- return os.path.abspath(os.path.expanduser(given))
- else:
- if basedir is None:
- basedir = "."
- return os.path.abspath(os.path.join(basedir, given))
-
-def path_dwim_relative(original, dirname, source, playbook_base, check=True):
- ''' find one file in a directory one level up in a dir named dirname relative to current '''
- # (used by roles code)
-
- from ansible.utils import template
-
-
- basedir = os.path.dirname(original)
- if os.path.islink(basedir):
- basedir = unfrackpath(basedir)
- template2 = os.path.join(basedir, dirname, source)
- else:
- template2 = os.path.join(basedir, '..', dirname, source)
- source2 = path_dwim(basedir, template2)
- if os.path.exists(source2):
- return source2
- obvious_local_path = path_dwim(playbook_base, source)
- if os.path.exists(obvious_local_path):
- return obvious_local_path
- if check:
- raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
- return source2 # which does not exist
-
-def repo_url_to_role_name(repo_url):
- # gets the role name out of a repo like
- # http://git.example.com/repos/repo.git" => "repo"
-
- if '://' not in repo_url and '@' not in repo_url:
- return repo_url
- trailing_path = repo_url.split('/')[-1]
- if trailing_path.endswith('.git'):
- trailing_path = trailing_path[:-4]
- if trailing_path.endswith('.tar.gz'):
- trailing_path = trailing_path[:-7]
- if ',' in trailing_path:
- trailing_path = trailing_path.split(',')[0]
- return trailing_path
-
-
-def role_spec_parse(role_spec):
- # takes a repo and a version like
- # git+http://git.example.com/repos/repo.git,v1.0
- # and returns a list of properties such as:
- # {
- # 'scm': 'git',
- # 'src': 'http://git.example.com/repos/repo.git',
- # 'version': 'v1.0',
- # 'name': 'repo'
- # }
-
- role_spec = role_spec.strip()
- role_version = ''
- default_role_versions = dict(git='master', hg='tip')
- if role_spec == "" or role_spec.startswith("#"):
- return (None, None, None, None)
-
- tokens = [s.strip() for s in role_spec.split(',')]
-
- # assume https://github.com URLs are git+https:// URLs and not
- # tarballs unless they end in '.zip'
- if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
- tokens[0] = 'git+' + tokens[0]
-
- if '+' in tokens[0]:
- (scm, role_url) = tokens[0].split('+')
- else:
- scm = None
- role_url = tokens[0]
- if len(tokens) >= 2:
- role_version = tokens[1]
- if len(tokens) == 3:
- role_name = tokens[2]
- else:
- role_name = repo_url_to_role_name(tokens[0])
- if scm and not role_version:
- role_version = default_role_versions.get(scm, '')
- return dict(scm=scm, src=role_url, version=role_version, name=role_name)
-
-
-def role_yaml_parse(role):
- if 'role' in role:
- # Old style: {role: "galaxy.role,version,name", other_vars: "here" }
- role_info = role_spec_parse(role['role'])
- if isinstance(role_info, dict):
- # Warning: Slight change in behaviour here. name may be being
- # overloaded. Previously, name was only a parameter to the role.
- # Now it is both a parameter to the role and the name that
- # ansible-galaxy will install under on the local system.
- if 'name' in role and 'name' in role_info:
- del role_info['name']
- role.update(role_info)
- else:
- # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
- if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
- role["src"] = "git+" + role["src"]
-
- if '+' in role["src"]:
- (scm, src) = role["src"].split('+')
- role["scm"] = scm
- role["src"] = src
-
- if 'name' not in role:
- role["name"] = repo_url_to_role_name(role["src"])
-
- if 'version' not in role:
- role['version'] = ''
-
- if 'scm' not in role:
- role['scm'] = None
-
- return role
-
-
-def json_loads(data):
- ''' parse a JSON string and return a data structure '''
- try:
- loaded = json.loads(data)
- except ValueError,e:
- raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
-
- return loaded
-
-def _clean_data(orig_data, from_remote=False, from_inventory=False):
- ''' remove jinja2 template tags from a string '''
-
- if not isinstance(orig_data, basestring):
- return orig_data
-
- # when the data is marked as having come from a remote, we always
- # replace any print blocks (ie. {{var}}), however when marked as coming
- # from inventory we only replace print blocks that contain a call to
- # a lookup plugin (ie. {{lookup('foo','bar'))}})
- replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
-
- regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
-
- with contextlib.closing(StringIO.StringIO(orig_data)) as data:
- # these variables keep track of opening block locations, as we only
- # want to replace matched pairs of print/block tags
- print_openings = []
- block_openings = []
- for mo in regex.finditer(orig_data):
- token = mo.group(0)
- token_start = mo.start(0)
-
- if token[0] == '{':
- if token == '{%':
- block_openings.append(token_start)
- elif token == '{{':
- print_openings.append(token_start)
-
- elif token[1] == '}':
- prev_idx = None
- if token == '%}' and block_openings:
- prev_idx = block_openings.pop()
- elif token == '}}' and print_openings:
- prev_idx = print_openings.pop()
-
- if prev_idx is not None:
- # replace the opening
- data.seek(prev_idx, os.SEEK_SET)
- data.write('{#')
- # replace the closing
- data.seek(token_start, os.SEEK_SET)
- data.write('#}')
-
- else:
- assert False, 'Unhandled regex match'
-
- return data.getvalue()
-
-def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
- '''
- walk a complex data structure, and use _clean_data() to
- remove any template tags that may exist
- '''
- if not from_remote and not from_inventory:
- raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
- if isinstance(orig_data, dict):
- data = orig_data.copy()
- for key in data:
- new_key = _clean_data_struct(key, from_remote, from_inventory)
- new_val = _clean_data_struct(data[key], from_remote, from_inventory)
- if key != new_key:
- del data[key]
- data[new_key] = new_val
- elif isinstance(orig_data, list):
- data = orig_data[:]
- for i in range(0, len(data)):
- data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
- elif isinstance(orig_data, basestring):
- data = _clean_data(orig_data, from_remote, from_inventory)
- else:
- data = orig_data
- return data
-
-def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
- ''' this version for module return data only '''
-
- orig_data = raw_data
-
- # ignore stuff like tcgetattr spewage or other warnings
- data = filter_leading_non_json_lines(raw_data)
-
- try:
- results = json.loads(data)
- except:
- if no_exceptions:
- return dict(failed=True, parsed=False, msg=raw_data)
- else:
- raise
-
- if from_remote:
- results = _clean_data_struct(results, from_remote, from_inventory)
-
- return results
-
-def serialize_args(args):
- '''
- Flattens a dictionary args to a k=v string
- '''
- module_args = ""
- for (k,v) in args.iteritems():
- if isinstance(v, basestring):
- module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
- elif isinstance(v, bool):
- module_args = "%s=%s %s" % (k, str(v), module_args)
- return module_args.strip()
-
-def merge_module_args(current_args, new_args):
- '''
- merges either a dictionary or string of k=v pairs with another string of k=v pairs,
- and returns a new k=v string without duplicates.
- '''
- if not isinstance(current_args, basestring):
- raise errors.AnsibleError("expected current_args to be a basestring")
- # we use parse_kv to split up the current args into a dictionary
- final_args = parse_kv(current_args)
- if isinstance(new_args, dict):
- final_args.update(new_args)
- elif isinstance(new_args, basestring):
- new_args_kv = parse_kv(new_args)
- final_args.update(new_args_kv)
- return serialize_args(final_args)
-
-def parse_yaml(data, path_hint=None):
- ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
-
- stripped_data = data.lstrip()
- loaded = None
- if stripped_data.startswith("{") or stripped_data.startswith("["):
- # since the line starts with { or [ we can infer this is a JSON document.
- try:
- loaded = json.loads(data)
- except ValueError, ve:
- if path_hint:
- raise errors.AnsibleError(path_hint + ": " + str(ve))
- else:
- raise errors.AnsibleError(str(ve))
- else:
- # else this is pretty sure to be a YAML document
- loaded = yaml.load(data, Loader=Loader)
-
- return loaded
-
-def process_common_errors(msg, probline, column):
- replaced = probline.replace(" ","")
-
- if ":{{" in replaced and "}}" in replaced:
- msg = msg + """
-This one looks easy to fix. YAML thought it was looking for the start of a
-hash/dictionary and was confused to see a second "{". Most likely this was
-meant to be an ansible template evaluation instead, so we have to give the
-parser a small hint that we wanted a string instead. The solution here is to
-just quote the entire value.
-
-For instance, if the original line was:
-
- app_path: {{ base_path }}/foo
-
-It should be written as:
-
- app_path: "{{ base_path }}/foo"
-"""
- return msg
-
- elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
- msg = msg + """
-This one looks easy to fix. There seems to be an extra unquoted colon in the line
-and this is confusing the parser. It was only expecting to find one free
-colon. The solution is just add some quotes around the colon, or quote the
-entire line after the first colon.
-
-For instance, if the original line was:
-
- copy: src=file.txt dest=/path/filename:with_colon.txt
-
-It can be written as:
-
- copy: src=file.txt dest='/path/filename:with_colon.txt'
-
-Or:
-
- copy: 'src=file.txt dest=/path/filename:with_colon.txt'
-
-
-"""
- return msg
- else:
- parts = probline.split(":")
- if len(parts) > 1:
- middle = parts[1].strip()
- match = False
- unbalanced = False
- if middle.startswith("'") and not middle.endswith("'"):
- match = True
- elif middle.startswith('"') and not middle.endswith('"'):
- match = True
- if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
- unbalanced = True
- if match:
- msg = msg + """
-This one looks easy to fix. It seems that there is a value started
-with a quote, and the YAML parser is expecting to see the line ended
-with the same kind of quote. For instance:
-
- when: "ok" in result.stdout
-
-Could be written as:
-
- when: '"ok" in result.stdout'
-
-or equivalently:
-
- when: "'ok' in result.stdout"
-
-"""
- return msg
-
- if unbalanced:
- msg = msg + """
-We could be wrong, but this one looks like it might be an issue with
-unbalanced quotes. If starting a value with a quote, make sure the
-line ends with the same set of quotes. For instance this arbitrary
-example:
-
- foo: "bad" "wolf"
-
-Could be written as:
-
- foo: '"bad" "wolf"'
-
-"""
- return msg
-
- return msg
-
-def process_yaml_error(exc, data, path=None, show_content=True):
- if hasattr(exc, 'problem_mark'):
- mark = exc.problem_mark
- if show_content:
- if mark.line -1 >= 0:
- before_probline = data.split("\n")[mark.line-1]
- else:
- before_probline = ''
- probline = data.split("\n")[mark.line]
- arrow = " " * mark.column + "^"
- msg = """Syntax Error while loading YAML script, %s
-Note: The error may actually appear before this position: line %s, column %s
-
-%s
-%s
-%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
-
- unquoted_var = None
- if '{{' in probline and '}}' in probline:
- if '"{{' not in probline or "'{{" not in probline:
- unquoted_var = True
-
- if not unquoted_var:
- msg = process_common_errors(msg, probline, mark.column)
- else:
- msg = msg + """
-We could be wrong, but this one looks like it might be an issue with
-missing quotes. Always quote template expression brackets when they
-start a value. For instance:
-
- with_items:
- - {{ foo }}
-
-Should be written as:
-
- with_items:
- - "{{ foo }}"
-
-"""
- else:
- # most likely displaying a file with sensitive content,
- # so don't show any of the actual lines of yaml just the
- # line number itself
- msg = """Syntax error while loading YAML script, %s
-The error appears to have been on line %s, column %s, but may actually
-be before there depending on the exact syntax problem.
-""" % (path, mark.line + 1, mark.column + 1)
-
- else:
- # No problem markers means we have to throw a generic
- # "stuff messed up" type message. Sry bud.
- if path:
- msg = "Could not parse YAML. Check over %s again." % path
- else:
- msg = "Could not parse YAML."
- raise errors.AnsibleYAMLValidationFailed(msg)
-
-
-def parse_yaml_from_file(path, vault_password=None):
- ''' convert a yaml file to a data structure '''
-
- data = None
- show_content = True
-
- try:
- data = open(path).read()
- except IOError:
- raise errors.AnsibleError("file could not read: %s" % path)
-
- vault = VaultLib(password=vault_password)
- if vault.is_encrypted(data):
- # if the file is encrypted and no password was specified,
- # the decrypt call would throw an error, but we check first
- # since the decrypt function doesn't know the file name
- if vault_password is None:
- raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
- data = vault.decrypt(data)
- show_content = False
-
- try:
- return parse_yaml(data, path_hint=path)
- except yaml.YAMLError, exc:
- process_yaml_error(exc, data, path, show_content)
-
-def parse_kv(args):
- ''' convert a string of key/value items to a dict '''
- options = {}
- if args is not None:
- try:
- vargs = split_args(args)
- except ValueError, ve:
- if 'no closing quotation' in str(ve).lower():
- raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
- else:
- raise
- for x in vargs:
- if "=" in x:
- k, v = x.split("=",1)
- options[k.strip()] = unquote(v.strip())
- return options
-
-def _validate_both_dicts(a, b):
-
- if not (isinstance(a, dict) and isinstance(b, dict)):
- raise errors.AnsibleError(
- "failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
- )
-
-def merge_hash(a, b):
- ''' recursively merges hash b into a
- keys from b take precedence over keys from a '''
-
- result = {}
-
- # we check here as well as in combine_vars() since this
- # function can work recursively with nested dicts
- _validate_both_dicts(a, b)
-
- for dicts in a, b:
- # next, iterate over b keys and values
- for k, v in dicts.iteritems():
- # if there's already such key in a
- # and that key contains dict
- if k in result and isinstance(result[k], dict):
- # merge those dicts recursively
- result[k] = merge_hash(a[k], v)
- else:
- # otherwise, just copy a value from b to a
- result[k] = v
-
- return result
-
-def default(value, function):
- ''' syntactic sugar around lazy evaluation of defaults '''
- if value is None:
- return function()
- return value
-
-
-def _git_repo_info(repo_path):
- ''' returns a string containing git branch, commit id and commit date '''
- result = None
- if os.path.exists(repo_path):
- # Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
- if os.path.isfile(repo_path):
- try:
- gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
- # There is a possibility the .git file to have an absolute path.
- if os.path.isabs(gitdir):
- repo_path = gitdir
- else:
- repo_path = os.path.join(repo_path[:-4], gitdir)
- except (IOError, AttributeError):
- return ''
- f = open(os.path.join(repo_path, "HEAD"))
- branch = f.readline().split('/')[-1].rstrip("\n")
- f.close()
- branch_path = os.path.join(repo_path, "refs", "heads", branch)
- if os.path.exists(branch_path):
- f = open(branch_path)
- commit = f.readline()[:10]
- f.close()
- else:
- # detached HEAD
- commit = branch[:10]
- branch = 'detached HEAD'
- branch_path = os.path.join(repo_path, "HEAD")
-
- date = time.localtime(os.stat(branch_path).st_mtime)
- if time.daylight == 0:
- offset = time.timezone
- else:
- offset = time.altzone
- result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
- time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
- else:
- result = ''
- return result
-
-
-def _gitinfo():
- basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
- repo_path = os.path.join(basedir, '.git')
- result = _git_repo_info(repo_path)
- submodules = os.path.join(basedir, '.gitmodules')
- if not os.path.exists(submodules):
- return result
- f = open(submodules)
- for line in f:
- tokens = line.strip().split(' ')
- if tokens[0] == 'path':
- submodule_path = tokens[2]
- submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
- if not submodule_info:
- submodule_info = ' not found - use git submodule update --init ' + submodule_path
- result += "\n {0}: {1}".format(submodule_path, submodule_info)
- f.close()
- return result
-
-
-def version(prog):
- result = "{0} {1}".format(prog, __version__)
- gitinfo = _gitinfo()
- if gitinfo:
- result = result + " {0}".format(gitinfo)
- result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
- return result
-
-def version_info(gitinfo=False):
- if gitinfo:
- # expensive call, user with care
- ansible_version_string = version('')
- else:
- ansible_version_string = __version__
- ansible_version = ansible_version_string.split()[0]
- ansible_versions = ansible_version.split('.')
- for counter in range(len(ansible_versions)):
- if ansible_versions[counter] == "":
- ansible_versions[counter] = 0
- try:
- ansible_versions[counter] = int(ansible_versions[counter])
- except:
- pass
- if len(ansible_versions) < 3:
- for counter in range(len(ansible_versions), 3):
- ansible_versions.append(0)
- return {'string': ansible_version_string.strip(),
- 'full': ansible_version,
- 'major': ansible_versions[0],
- 'minor': ansible_versions[1],
- 'revision': ansible_versions[2]}
-
-def getch():
- ''' read in a single character '''
- fd = sys.stdin.fileno()
- old_settings = termios.tcgetattr(fd)
- try:
- tty.setraw(sys.stdin.fileno())
- ch = sys.stdin.read(1)
- finally:
- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
- return ch
-
-def sanitize_output(arg_string):
- ''' strips private info out of a string '''
-
- private_keys = ('password', 'login_password')
-
- output = []
- for part in arg_string.split():
- try:
- (k, v) = part.split('=', 1)
- except ValueError:
- v = heuristic_log_sanitize(part)
- output.append(v)
- continue
-
- if k in private_keys:
- v = 'VALUE_HIDDEN'
- else:
- v = heuristic_log_sanitize(v)
- output.append('%s=%s' % (k, v))
-
- output = ' '.join(output)
- return output
-
-
-####################################################################
-# option handling code for /usr/bin/ansible and ansible-playbook
-# below this line
-
-class SortedOptParser(optparse.OptionParser):
- '''Optparser which sorts the options by opt before outputting --help'''
-
- def format_help(self, formatter=None):
- self.option_list.sort(key=operator.methodcaller('get_opt_string'))
- return optparse.OptionParser.format_help(self, formatter=None)
-
-def increment_debug(option, opt, value, parser):
- global VERBOSITY
- VERBOSITY += 1
-
-def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
- async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
- ''' create an options parser for any ansible script '''
-
- parser = SortedOptParser(usage, version=version("%prog"))
- parser.add_option('-v','--verbose', default=False, action="callback",
- callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
-
- parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
- help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
- parser.add_option('-i', '--inventory-file', dest='inventory',
- help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
- default=constants.DEFAULT_HOST_LIST)
- parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
- help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
- help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
- parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
- help='ask for SSH password')
- parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
- help='use this file to authenticate the connection')
- parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
- help='ask for vault password')
- parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
- dest='vault_password_file', help="vault password file")
- parser.add_option('--list-hosts', dest='listhosts', action='store_true',
- help='outputs a list of matching hosts; does not execute anything else')
- parser.add_option('-M', '--module-path', dest='module_path',
- help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
- default=None)
-
- if subset_opts:
- parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
- help='further limit selected hosts to an additional pattern')
-
- parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
- dest='timeout',
- help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
-
- if output_opts:
- parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
- help='condense output')
- parser.add_option('-t', '--tree', dest='tree', default=None,
- help='log output to this directory')
-
- if runas_opts:
- # priv user defaults to root later on to enable detecting when this option was given here
- parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password (deprecated, use become)')
- parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
- help='ask for su password (deprecated, use become)')
- parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
- help="run operations with sudo (nopasswd) (deprecated, use become)")
- parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
- help='desired sudo user (default=root) (deprecated, use become)')
- parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
- help='run operations with su (deprecated, use become)')
- parser.add_option('-R', '--su-user', default=None,
- help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
-
- # consolidated privilege escalation (become)
- parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
- help="run operations with become (nopasswd implied)")
- parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
- help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
- parser.add_option('--become-user', default=None, dest='become_user', type='string',
- help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
- parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
- help='ask for privilege escalation password')
-
-
- if connect_opts:
- parser.add_option('-c', '--connection', dest='connection',
- default=constants.DEFAULT_TRANSPORT,
- help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
-
- if async_opts:
- parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
- dest='poll_interval',
- help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
- parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
- help='run asynchronously, failing after X seconds (default=N/A)')
-
- if check_opts:
- parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
- help="don't make any changes; instead, try to predict some of the changes that may occur"
- )
-
- if diff_opts:
- parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
- help="when changing (small) files and templates, show the differences in those files; works great with --check"
- )
-
- return parser
-
-def parse_extra_vars(extra_vars_opts, vault_pass):
- extra_vars = {}
- for extra_vars_opt in extra_vars_opts:
- extra_vars_opt = to_unicode(extra_vars_opt)
- if extra_vars_opt.startswith(u"@"):
- # Argument is a YAML file (JSON is a subset of YAML)
- extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
- elif extra_vars_opt and extra_vars_opt[0] in u'[{':
- # Arguments as YAML
- extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
- else:
- # Arguments as Key-value
- extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
- return extra_vars
-
-def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
-
- vault_pass = None
- new_vault_pass = None
-
- if ask_vault_pass:
- vault_pass = getpass.getpass(prompt="Vault password: ")
-
- if ask_vault_pass and confirm_vault:
- vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
- if vault_pass != vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- if ask_new_vault_pass:
- new_vault_pass = getpass.getpass(prompt="New Vault password: ")
-
- if ask_new_vault_pass and confirm_new:
- new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
- if new_vault_pass != new_vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- # enforce no newline chars at the end of passwords
- if vault_pass:
- vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
- if new_vault_pass:
- new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
-
- return vault_pass, new_vault_pass
-
-def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
- sshpass = None
- becomepass = None
- vaultpass = None
- become_prompt = ''
-
- if ask_pass:
- sshpass = getpass.getpass(prompt="SSH password: ")
- become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
- if sshpass:
- sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
- else:
- become_prompt = "%s password: " % become_method.upper()
-
- if become_ask_pass:
- becomepass = getpass.getpass(prompt=become_prompt)
- if ask_pass and becomepass == '':
- becomepass = sshpass
- if becomepass:
- becomepass = to_bytes(becomepass)
-
- if ask_vault_pass:
- vaultpass = getpass.getpass(prompt="Vault password: ")
- if vaultpass:
- vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
-
- return (sshpass, becomepass, vaultpass)
-
-
-def choose_pass_prompt(options):
-
- if options.ask_su_pass:
- return 'su'
- elif options.ask_sudo_pass:
- return 'sudo'
-
- return options.become_method
-
-def normalize_become_options(options):
-
- options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
- options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
-
- if options.become:
- pass
- elif options.sudo:
- options.become = True
- options.become_method = 'sudo'
- elif options.su:
- options.become = True
- options.become_method = 'su'
-
-
-def do_encrypt(result, encrypt, salt_size=None, salt=None):
- if PASSLIB_AVAILABLE:
- try:
- crypt = getattr(passlib.hash, encrypt)
- except:
- raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
-
- if salt_size:
- result = crypt.encrypt(result, salt_size=salt_size)
- elif salt:
- result = crypt.encrypt(result, salt=salt)
- else:
- result = crypt.encrypt(result)
- else:
- raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
-
- return result
-
-def last_non_blank_line(buf):
-
- all_lines = buf.splitlines()
- all_lines.reverse()
- for line in all_lines:
- if (len(line) > 0):
- return line
- # shouldn't occur unless there's no output
- return ""
-
-def filter_leading_non_json_lines(buf):
- '''
- used to avoid random output from SSH at the top of JSON output, like messages from
- tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
-
- need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
- filter only leading lines since multiline JSON is valid.
- '''
-
- filtered_lines = StringIO.StringIO()
- stop_filtering = False
- for line in buf.splitlines():
- if stop_filtering or line.startswith('{') or line.startswith('['):
- stop_filtering = True
- filtered_lines.write(line + '\n')
- return filtered_lines.getvalue()
-
-def boolean(value):
- val = str(value)
- if val.lower() in [ "true", "t", "y", "1", "yes" ]:
- return True
- else:
- return False
-
-def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
- """
- helper function for connection plugins to create privilege escalation commands
- """
-
- randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
- success_key = 'BECOME-SUCCESS-%s' % randbits
- prompt = None
- becomecmd = None
-
- shell = shell or '$SHELL'
-
- if method == 'sudo':
- # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
- # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
- # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
- # string to the user's shell. We loop reading output until we see the randomly-generated
- # sudo prompt set with the -p option.
- prompt = '[sudo via ansible, key=%s] password: ' % randbits
- exe = exe or C.DEFAULT_SUDO_EXE
- becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
- (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif method == 'su':
- exe = exe or C.DEFAULT_SU_EXE
- flags = flags or C.DEFAULT_SU_FLAGS
- becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif method == 'pbrun':
- prompt = 'assword:'
- exe = exe or 'pbrun'
- flags = flags or ''
- becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
-
- elif method == 'pfexec':
- exe = exe or 'pfexec'
- flags = flags or ''
- # No user as it uses it's own exec_attr to figure it out
- becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
-
- if becomecmd is None:
- raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
-
- return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
-
-
-def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
- """
- helper function for connection plugins to create sudo commands
- """
- return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
-
-
-def make_su_cmd(su_user, executable, cmd):
- """
- Helper function for connection plugins to create direct su commands
- """
- return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
-
-def get_diff(diff):
- # called by --diff usage in playbook and runner via callbacks
- # include names in diffs 'before' and 'after' and do diff -U 10
-
- try:
- with warnings.catch_warnings():
- warnings.simplefilter('ignore')
- ret = []
- if 'dst_binary' in diff:
- ret.append("diff skipped: destination file appears to be binary\n")
- if 'src_binary' in diff:
- ret.append("diff skipped: source file appears to be binary\n")
- if 'dst_larger' in diff:
- ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
- if 'src_larger' in diff:
- ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
- if 'before' in diff and 'after' in diff:
- if 'before_header' in diff:
- before_header = "before: %s" % diff['before_header']
- else:
- before_header = 'before'
- if 'after_header' in diff:
- after_header = "after: %s" % diff['after_header']
- else:
- after_header = 'after'
- differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
- for line in list(differ):
- ret.append(line)
- return u"".join(ret)
- except UnicodeDecodeError:
- return ">> the files are different, but the diff library cannot compare unicode strings"
-
-def is_list_of_strings(items):
- for x in items:
- if not isinstance(x, basestring):
- return False
- return True
-
-def list_union(a, b):
- result = []
- for x in a:
- if x not in result:
- result.append(x)
- for x in b:
- if x not in result:
- result.append(x)
- return result
-
-def list_intersection(a, b):
- result = []
- for x in a:
- if x in b and x not in result:
- result.append(x)
- return result
-
-def list_difference(a, b):
- result = []
- for x in a:
- if x not in b and x not in result:
- result.append(x)
- for x in b:
- if x not in a and x not in result:
- result.append(x)
- return result
-
-def contains_vars(data):
- '''
- returns True if the data contains a variable pattern
- '''
- return "$" in data or "{{" in data
-
-def safe_eval(expr, locals={}, include_exceptions=False):
- '''
- This is intended for allowing things like:
- with_items: a_list_variable
-
- Where Jinja2 would return a string but we do not want to allow it to
- call functions (outside of Jinja2, where the env is constrained). If
- the input data to this function came from an untrusted (remote) source,
- it should first be run through _clean_data_struct() to ensure the data
- is further sanitized prior to evaluation.
-
- Based on:
- http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
- '''
-
- # this is the whitelist of AST nodes we are going to
- # allow in the evaluation. Any node type other than
- # those listed here will raise an exception in our custom
- # visitor class defined below.
- SAFE_NODES = set(
- (
- ast.Add,
- ast.BinOp,
- ast.Call,
- ast.Compare,
- ast.Dict,
- ast.Div,
- ast.Expression,
- ast.List,
- ast.Load,
- ast.Mult,
- ast.Num,
- ast.Name,
- ast.Str,
- ast.Sub,
- ast.Tuple,
- ast.UnaryOp,
- )
- )
-
- # AST node types were expanded after 2.6
- if not sys.version.startswith('2.6'):
- SAFE_NODES.union(
- set(
- (ast.Set,)
- )
- )
-
- filter_list = []
- for filter in filter_loader.all():
- filter_list.extend(filter.filters().keys())
-
- CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
-
- class CleansingNodeVisitor(ast.NodeVisitor):
- def generic_visit(self, node, inside_call=False):
- if type(node) not in SAFE_NODES:
- raise Exception("invalid expression (%s)" % expr)
- elif isinstance(node, ast.Call):
- inside_call = True
- elif isinstance(node, ast.Name) and inside_call:
- if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
- raise Exception("invalid function: %s" % node.id)
- # iterate over all child nodes
- for child_node in ast.iter_child_nodes(node):
- self.generic_visit(child_node, inside_call)
-
- if not isinstance(expr, basestring):
- # already templated to a datastructure, perhaps?
- if include_exceptions:
- return (expr, None)
- return expr
-
- cnv = CleansingNodeVisitor()
- try:
- parsed_tree = ast.parse(expr, mode='eval')
- cnv.visit(parsed_tree)
- compiled = compile(parsed_tree, expr, 'eval')
- result = eval(compiled, {}, locals)
-
- if include_exceptions:
- return (result, None)
- else:
- return result
- except SyntaxError, e:
- # special handling for syntax errors, we just return
- # the expression string back as-is
- if include_exceptions:
- return (expr, None)
- return expr
- except Exception, e:
- if include_exceptions:
- return (expr, e)
- return expr
-
-
-def listify_lookup_plugin_terms(terms, basedir, inject):
-
- from ansible.utils import template
-
- if isinstance(terms, basestring):
- # someone did:
- # with_items: alist
- # OR
- # with_items: {{ alist }}
-
- stripped = terms.strip()
- if not (stripped.startswith('{') or stripped.startswith('[')) and \
- not stripped.startswith("/") and \
- not stripped.startswith('set([') and \
- not LOOKUP_REGEX.search(terms):
- # if not already a list, get ready to evaluate with Jinja2
- # not sure why the "/" is in above code :)
- try:
- new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
- if isinstance(new_terms, basestring) and "{{" in new_terms:
- pass
- else:
- terms = new_terms
- except:
- pass
-
- if '{' in terms or '[' in terms:
- # Jinja2 already evaluated a variable to a list.
- # Jinja2-ified list needs to be converted back to a real type
- # TODO: something a bit less heavy than eval
- return safe_eval(terms)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- return terms
-
-def combine_vars(a, b):
-
- _validate_both_dicts(a, b)
-
- if C.DEFAULT_HASH_BEHAVIOUR == "merge":
- return merge_hash(a, b)
- else:
- return dict(a.items() + b.items())
-
-def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
- '''Return a random password string of length containing only chars.'''
-
- password = []
- while len(password) < length:
- new_char = os.urandom(1)
- if new_char in chars:
- password.append(new_char)
-
- return ''.join(password)
-
-def before_comment(msg):
- ''' what's the part of a string before a comment? '''
- msg = msg.replace("\#","**NOT_A_COMMENT**")
- msg = msg.split("#")[0]
- msg = msg.replace("**NOT_A_COMMENT**","#")
- return msg
-
-def load_vars(basepath, results, vault_password=None):
- """
- Load variables from any potential yaml filename combinations of basepath,
- returning result.
- """
-
- paths_to_check = [ "".join([basepath, ext])
- for ext in C.YAML_FILENAME_EXTENSIONS ]
-
- found_paths = []
-
- for path in paths_to_check:
- found, results = _load_vars_from_path(path, results, vault_password=vault_password)
- if found:
- found_paths.append(path)
-
-
- # disallow the potentially confusing situation that there are multiple
- # variable files for the same name. For example if both group_vars/all.yml
- # and group_vars/all.yaml
- if len(found_paths) > 1:
- raise errors.AnsibleError("Multiple variable files found. "
- "There should only be one. %s" % ( found_paths, ))
-
- return results
-
-## load variables from yaml files/dirs
-# e.g. host/group_vars
-#
-def _load_vars_from_path(path, results, vault_password=None):
- """
- Robustly access the file at path and load variables, carefully reporting
- errors in a friendly/informative way.
-
- Return the tuple (found, new_results, )
- """
-
- try:
- # in the case of a symbolic link, we want the stat of the link itself,
- # not its target
- pathstat = os.lstat(path)
- except os.error, err:
- # most common case is that nothing exists at that path.
- if err.errno == errno.ENOENT:
- return False, results
- # otherwise this is a condition we should report to the user
- raise errors.AnsibleError(
- "%s is not accessible: %s."
- " Please check its permissions." % ( path, err.strerror))
-
- # symbolic link
- if stat.S_ISLNK(pathstat.st_mode):
- try:
- target = os.path.realpath(path)
- except os.error, err2:
- raise errors.AnsibleError("The symbolic link at %s "
- "is not readable: %s. Please check its permissions."
- % (path, err2.strerror, ))
- # follow symbolic link chains by recursing, so we repeat the same
- # permissions checks above and provide useful errors.
- return _load_vars_from_path(target, results, vault_password)
-
- # directory
- if stat.S_ISDIR(pathstat.st_mode):
-
- # support organizing variables across multiple files in a directory
- return True, _load_vars_from_folder(path, results, vault_password=vault_password)
-
- # regular file
- elif stat.S_ISREG(pathstat.st_mode):
- data = parse_yaml_from_file(path, vault_password=vault_password)
- if data and type(data) != dict:
- raise errors.AnsibleError(
- "%s must be stored as a dictionary/hash" % path)
- elif data is None:
- data = {}
-
- # combine vars overrides by default but can be configured to do a
- # hash merge in settings
- results = combine_vars(results, data)
- return True, results
-
- # something else? could be a fifo, socket, device, etc.
- else:
- raise errors.AnsibleError("Expected a variable file or directory "
- "but found a non-file object at path %s" % (path, ))
-
-def _load_vars_from_folder(folder_path, results, vault_password=None):
- """
- Load all variables within a folder recursively.
- """
-
- # this function and _load_vars_from_path are mutually recursive
-
- try:
- names = os.listdir(folder_path)
- except os.error, err:
- raise errors.AnsibleError(
- "This folder cannot be listed: %s: %s."
- % ( folder_path, err.strerror))
-
- # evaluate files in a stable order rather than whatever order the
- # filesystem lists them.
- names.sort()
-
- # do not parse hidden files or dirs, e.g. .svn/
- paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')]
- for path in paths:
- _found, results = _load_vars_from_path(path, results, vault_password=vault_password)
- return results
-
-def update_hash(hash, key, new_value):
- ''' used to avoid nested .update calls on the parent '''
-
- value = hash.get(key, {})
- value.update(new_value)
- hash[key] = value
-
-def censor_unlogged_data(data):
- '''
- used when the no_log: True attribute is passed to a task to keep data from a callback.
- NOT intended to prevent variable registration, but only things from showing up on
- screen
- '''
- new_data = {}
- for (x,y) in data.iteritems():
- if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
- new_data[x] = y
- new_data['censored'] = 'results hidden due to no_log parameter'
- return new_data
-
-def check_mutually_exclusive_privilege(options, parser):
-
- # privilege escalation command line arguments need to be mutually exclusive
- if (options.su or options.su_user or options.ask_su_pass) and \
- (options.sudo or options.sudo_user or options.ask_sudo_pass) or \
- (options.su or options.su_user or options.ask_su_pass) and \
- (options.become or options.become_user or options.become_ask_pass) or \
- (options.sudo or options.sudo_user or options.ask_sudo_pass) and \
- (options.become or options.become_user or options.become_ask_pass):
-
- parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
- "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
- "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
- " are exclusive of each other")
-
-
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/v2/ansible/utils/boolean.py b/lib/ansible/utils/boolean.py
similarity index 100%
rename from v2/ansible/utils/boolean.py
rename to lib/ansible/utils/boolean.py
diff --git a/v2/ansible/utils/color.py b/lib/ansible/utils/color.py
similarity index 100%
rename from v2/ansible/utils/color.py
rename to lib/ansible/utils/color.py
diff --git a/v2/ansible/utils/debug.py b/lib/ansible/utils/debug.py
similarity index 100%
rename from v2/ansible/utils/debug.py
rename to lib/ansible/utils/debug.py
diff --git a/v2/ansible/utils/display.py b/lib/ansible/utils/display.py
similarity index 100%
rename from v2/ansible/utils/display.py
rename to lib/ansible/utils/display.py
diff --git a/v2/ansible/utils/encrypt.py b/lib/ansible/utils/encrypt.py
similarity index 100%
rename from v2/ansible/utils/encrypt.py
rename to lib/ansible/utils/encrypt.py
diff --git a/lib/ansible/utils/hashing.py b/lib/ansible/utils/hashing.py
index a7d142e5bd4..5e378db79f4 100644
--- a/lib/ansible/utils/hashing.py
+++ b/lib/ansible/utils/hashing.py
@@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
+from ansible.errors import AnsibleError
# Note, sha1 is the only hash algorithm compatible with python2.4 and with
# FIPS-140 mode (as of 11-2014)
@@ -43,6 +44,8 @@ def secure_hash_s(data, hash_func=sha1):
digest = hash_func()
try:
+ if not isinstance(data, basestring):
+ data = "%s" % data
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
@@ -62,8 +65,8 @@ def secure_hash(filename, hash_func=sha1):
digest.update(block)
block = infile.read(blocksize)
infile.close()
- except IOError, e:
- raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
+ except IOError as e:
+ raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
diff --git a/v2/ansible/utils/listify.py b/lib/ansible/utils/listify.py
similarity index 100%
rename from v2/ansible/utils/listify.py
rename to lib/ansible/utils/listify.py
diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py
index ee99af2cb54..632b4a00c2a 100644
--- a/lib/ansible/utils/module_docs.py
+++ b/lib/ansible/utils/module_docs.py
@@ -23,7 +23,7 @@ import ast
import yaml
import traceback
-from ansible import utils
+from ansible.plugins import fragment_loader
# modules that are ok that they do not have documentation strings
BLACKLIST_MODULES = [
@@ -66,7 +66,7 @@ def get_docstring(filename, verbose=False):
if fragment_slug != 'doesnotexist':
- fragment_class = utils.plugins.fragment_loader.get(fragment_name)
+ fragment_class = fragment_loader.get(fragment_name)
assert fragment_class is not None
fragment_yaml = getattr(fragment_class, fragment_var, '{}')
diff --git a/v2/ansible/utils/module_docs_fragments b/lib/ansible/utils/module_docs_fragments
similarity index 100%
rename from v2/ansible/utils/module_docs_fragments
rename to lib/ansible/utils/module_docs_fragments
diff --git a/v2/ansible/utils/path.py b/lib/ansible/utils/path.py
similarity index 100%
rename from v2/ansible/utils/path.py
rename to lib/ansible/utils/path.py
diff --git a/lib/ansible/utils/unicode.py b/lib/ansible/utils/unicode.py
index 7bd035c0075..2cff2e5e45c 100644
--- a/lib/ansible/utils/unicode.py
+++ b/lib/ansible/utils/unicode.py
@@ -19,6 +19,8 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from six import string_types, text_type, binary_type, PY3
+
# to_bytes and to_unicode were written by Toshio Kuratomi for the
# python-kitchen library https://pypi.python.org/pypi/kitchen
# They are licensed in kitchen under the terms of the GPLv2+
@@ -35,6 +37,9 @@ _LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1',
# EXCEPTION_CONVERTERS is defined below due to using to_unicode
+if PY3:
+ basestring = (str, bytes)
+
def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
'''Convert an object into a :class:`unicode` string
@@ -89,12 +94,12 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
# Could use isbasestring/isunicode here but we want this code to be as
# fast as possible
if isinstance(obj, basestring):
- if isinstance(obj, unicode):
+ if isinstance(obj, text_type):
return obj
if encoding in _UTF8_ALIASES:
- return unicode(obj, 'utf-8', errors)
+ return text_type(obj, 'utf-8', errors)
if encoding in _LATIN1_ALIASES:
- return unicode(obj, 'latin-1', errors)
+ return text_type(obj, 'latin-1', errors)
return obj.decode(encoding, errors)
if not nonstring:
@@ -110,19 +115,19 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
simple = None
if not simple:
try:
- simple = str(obj)
+ simple = text_type(obj)
except UnicodeError:
try:
simple = obj.__str__()
except (UnicodeError, AttributeError):
simple = u''
- if isinstance(simple, str):
- return unicode(simple, encoding, errors)
+ if isinstance(simple, binary_type):
+ return text_type(simple, encoding, errors)
return simple
elif nonstring in ('repr', 'strict'):
obj_repr = repr(obj)
- if isinstance(obj_repr, str):
- obj_repr = unicode(obj_repr, encoding, errors)
+ if isinstance(obj_repr, binary_type):
+ obj_repr = text_type(obj_repr, encoding, errors)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_unicode was given "%(obj)s" which is neither'
@@ -198,19 +203,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
# Could use isbasestring, isbytestring here but we want this to be as fast
# as possible
if isinstance(obj, basestring):
- if isinstance(obj, str):
+ if isinstance(obj, binary_type):
return obj
return obj.encode(encoding, errors)
if not nonstring:
nonstring = 'simplerepr'
if nonstring == 'empty':
- return ''
+ return b''
elif nonstring == 'passthru':
return obj
elif nonstring == 'simplerepr':
try:
- simple = str(obj)
+ simple = binary_type(obj)
except UnicodeError:
try:
simple = obj.__str__()
@@ -220,19 +225,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
try:
simple = obj.__unicode__()
except (AttributeError, UnicodeError):
- simple = ''
- if isinstance(simple, unicode):
+ simple = b''
+ if isinstance(simple, text_type):
simple = simple.encode(encoding, 'replace')
return simple
elif nonstring in ('repr', 'strict'):
try:
obj_repr = obj.__repr__()
except (AttributeError, UnicodeError):
- obj_repr = ''
- if isinstance(obj_repr, unicode):
+ obj_repr = b''
+ if isinstance(obj_repr, text_type):
obj_repr = obj_repr.encode(encoding, errors)
else:
- obj_repr = str(obj_repr)
+ obj_repr = binary_type(obj_repr)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_bytes was given "%(obj)s" which is neither'
diff --git a/v2/ansible/utils/vars.py b/lib/ansible/utils/vars.py
similarity index 100%
rename from v2/ansible/utils/vars.py
rename to lib/ansible/utils/vars.py
diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py
index 842688a2c18..5c704afac59 100644
--- a/lib/ansible/utils/vault.py
+++ b/lib/ansible/utils/vault.py
@@ -1,4 +1,6 @@
-# (c) 2014, James Tanner
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -12,574 +14,43 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-#
-# ansible-pull is a script that runs ansible in local mode
-# after checking out a playbooks directory from source repo. There is an
-# example playbook to bootstrap this script in the examples/ dir which
-# installs ansible and sets it up to run on cron.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
-import shlex
-import shutil
-import tempfile
-from io import BytesIO
-from subprocess import call
-from ansible import errors
-from hashlib import sha256
+import subprocess
-# Note: Only used for loading obsolete VaultAES files. All files are written
-# using the newer VaultAES256 which does not require md5
-try:
- from hashlib import md5
-except ImportError:
- try:
- from md5 import md5
- except ImportError:
- # MD5 unavailable. Possibly FIPS mode
- md5 = None
-
-from binascii import hexlify
-from binascii import unhexlify
from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.utils.path import is_executable
-try:
- from Crypto.Hash import SHA256, HMAC
- HAS_HASH = True
-except ImportError:
- HAS_HASH = False
+def read_vault_file(vault_password_file):
+ """
+ Read a vault password from a file or if executable, execute the script and
+ retrieve password from STDOUT
+ """
-# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Util import Counter
- HAS_COUNTER = True
-except ImportError:
- HAS_COUNTER = False
+ this_path = os.path.realpath(os.path.expanduser(vault_password_file))
+ if not os.path.exists(this_path):
+ raise AnsibleError("The vault password file %s was not found" % this_path)
-# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Protocol.KDF import PBKDF2
- HAS_PBKDF2 = True
-except ImportError:
- HAS_PBKDF2 = False
-
-# AES IMPORTS
-try:
- from Crypto.Cipher import AES as AES
- HAS_AES = True
-except ImportError:
- HAS_AES = False
-
-CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto"
-
-HEADER='$ANSIBLE_VAULT'
-CIPHER_WHITELIST=['AES', 'AES256']
-
-class VaultLib(object):
-
- def __init__(self, password):
- self.password = password
- self.cipher_name = None
- self.version = '1.1'
-
- def is_encrypted(self, data):
- if data.startswith(HEADER):
- return True
- else:
- return False
-
- def encrypt(self, data):
-
- if self.is_encrypted(data):
- raise errors.AnsibleError("data is already encrypted")
-
- if not self.cipher_name:
- self.cipher_name = "AES256"
- #raise errors.AnsibleError("the cipher must be set before encrypting data")
-
- if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
- cipher = globals()['Vault' + self.cipher_name]
- this_cipher = cipher()
- else:
- raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name)
-
- """
- # combine sha + data
- this_sha = sha256(data).hexdigest()
- tmp_data = this_sha + "\n" + data
- """
-
- # encrypt sha + data
- enc_data = this_cipher.encrypt(data, self.password)
-
- # add header
- tmp_data = self._add_header(enc_data)
- return tmp_data
-
- def decrypt(self, data):
- if self.password is None:
- raise errors.AnsibleError("A vault password must be specified to decrypt data")
-
- if not self.is_encrypted(data):
- raise errors.AnsibleError("data is not encrypted")
-
- # clean out header
- data = self._split_header(data)
-
- # create the cipher object
- if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
- cipher = globals()['Vault' + self.cipher_name]
- this_cipher = cipher()
- else:
- raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name)
-
- # try to unencrypt data
- data = this_cipher.decrypt(data, self.password)
- if data is None:
- raise errors.AnsibleError("Decryption failed")
-
- return data
-
- def _add_header(self, data):
- # combine header and encrypted data in 80 char columns
-
- #tmpdata = hexlify(data)
- tmpdata = [data[i:i+80] for i in range(0, len(data), 80)]
-
- if not self.cipher_name:
- raise errors.AnsibleError("the cipher must be set before adding a header")
-
- dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n"
-
- for l in tmpdata:
- dirty_data += l + '\n'
-
- return dirty_data
-
-
- def _split_header(self, data):
- # used by decrypt
-
- tmpdata = data.split('\n')
- tmpheader = tmpdata[0].strip().split(';')
-
- self.version = str(tmpheader[1].strip())
- self.cipher_name = str(tmpheader[2].strip())
- clean_data = '\n'.join(tmpdata[1:])
-
- """
- # strip out newline, join, unhex
- clean_data = [ x.strip() for x in clean_data ]
- clean_data = unhexlify(''.join(clean_data))
- """
-
- return clean_data
-
- def __enter__(self):
- return self
-
- def __exit__(self, *err):
- pass
-
-class VaultEditor(object):
- # uses helper methods for write_file(self, filename, data)
- # to write a file so that code isn't duplicated for simple
- # file I/O, ditto read_file(self, filename) and launch_editor(self, filename)
- # ... "Don't Repeat Yourself", etc.
-
- def __init__(self, cipher_name, password, filename):
- # instantiates a member variable for VaultLib
- self.cipher_name = cipher_name
- self.password = password
- self.filename = filename
-
- def _edit_file_helper(self, existing_data=None, cipher=None):
- # make sure the umask is set to a sane value
- old_umask = os.umask(0o077)
-
- # Create a tempfile
- _, tmp_path = tempfile.mkstemp()
-
- if existing_data:
- self.write_data(existing_data, tmp_path)
-
- # drop the user into an editor on the tmp file
+ if is_executable(this_path):
try:
- call(self._editor_shell_command(tmp_path))
- except OSError, e:
- raise Exception("Failed to open editor (%s): %s" % (self._editor_shell_command(tmp_path)[0],str(e)))
- tmpdata = self.read_data(tmp_path)
-
- # create new vault
- this_vault = VaultLib(self.password)
- if cipher:
- this_vault.cipher_name = cipher
-
- # encrypt new data and write out to tmp
- enc_data = this_vault.encrypt(tmpdata)
- self.write_data(enc_data, tmp_path)
-
- # shuffle tmp file into place
- self.shuffle_files(tmp_path, self.filename)
-
- # and restore umask
- os.umask(old_umask)
-
- def create_file(self):
- """ create a new encrypted file """
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if os.path.isfile(self.filename):
- raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename)
-
- # Let the user specify contents and save file
- self._edit_file_helper(cipher=self.cipher_name)
-
- def decrypt_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if not os.path.isfile(self.filename):
- raise errors.AnsibleError("%s does not exist" % self.filename)
-
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- if this_vault.is_encrypted(tmpdata):
- dec_data = this_vault.decrypt(tmpdata)
- if dec_data is None:
- raise errors.AnsibleError("Decryption failed")
- else:
- self.write_data(dec_data, self.filename)
- else:
- raise errors.AnsibleError("%s is not encrypted" % self.filename)
-
- def edit_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt to tmpfile
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
-
- # let the user edit the data and save
- self._edit_file_helper(existing_data=dec_data)
- ###we want the cipher to default to AES256 (get rid of files
- # encrypted with the AES cipher)
- #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name)
-
-
- def view_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt to tmpfile
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
- old_umask = os.umask(0o077)
- _, tmp_path = tempfile.mkstemp()
- self.write_data(dec_data, tmp_path)
- os.umask(old_umask)
-
- # drop the user into pager on the tmp file
- call(self._pager_shell_command(tmp_path))
- os.remove(tmp_path)
-
- def encrypt_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if not os.path.isfile(self.filename):
- raise errors.AnsibleError("%s does not exist" % self.filename)
-
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- this_vault.cipher_name = self.cipher_name
- if not this_vault.is_encrypted(tmpdata):
- enc_data = this_vault.encrypt(tmpdata)
- self.write_data(enc_data, self.filename)
- else:
- raise errors.AnsibleError("%s is already encrypted" % self.filename)
-
- def rekey_file(self, new_password):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
-
- # create new vault
- new_vault = VaultLib(new_password)
-
- # we want to force cipher to the default
- #new_vault.cipher_name = this_vault.cipher_name
-
- # re-encrypt data and re-write file
- enc_data = new_vault.encrypt(dec_data)
- self.write_data(enc_data, self.filename)
-
- def read_data(self, filename):
- f = open(filename, "rb")
- tmpdata = f.read()
- f.close()
- return tmpdata
-
- def write_data(self, data, filename):
- if os.path.isfile(filename):
- os.remove(filename)
- f = open(filename, "wb")
- f.write(data)
- f.close()
-
- def shuffle_files(self, src, dest):
- # overwrite dest with src
- if os.path.isfile(dest):
- os.remove(dest)
- shutil.move(src, dest)
-
- def _editor_shell_command(self, filename):
- EDITOR = os.environ.get('EDITOR','vim')
- editor = shlex.split(EDITOR)
- editor.append(filename)
-
- return editor
-
- def _pager_shell_command(self, filename):
- PAGER = os.environ.get('PAGER','less')
- pager = shlex.split(PAGER)
- pager.append(filename)
-
- return pager
-
-########################################
-# CIPHERS #
-########################################
-
-class VaultAES(object):
-
- # this version has been obsoleted by the VaultAES256 class
- # which uses encrypt-then-mac (fixing order) and also improving the KDF used
- # code remains for upgrade purposes only
- # http://stackoverflow.com/a/16761459
-
- def __init__(self):
- if not md5:
- raise errors.AnsibleError('md5 hash is unavailable (Could be due to FIPS mode). Legacy VaultAES format is unavailable.')
- if not HAS_AES:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- def aes_derive_key_and_iv(self, password, salt, key_length, iv_length):
-
- """ Create a key and an initialization vector """
-
- d = d_i = ''
- while len(d) < key_length + iv_length:
- d_i = md5(d_i + password + salt).digest()
- d += d_i
-
- key = d[:key_length]
- iv = d[key_length:key_length+iv_length]
-
- return key, iv
-
- def encrypt(self, data, password, key_length=32):
-
- """ Read plaintext data from in_file and write encrypted to out_file """
-
-
- # combine sha + data
- this_sha = sha256(data).hexdigest()
- tmp_data = this_sha + "\n" + data
-
- in_file = BytesIO(tmp_data)
- in_file.seek(0)
- out_file = BytesIO()
-
- bs = AES.block_size
-
- # Get a block of random data. EL does not have Crypto.Random.new()
- # so os.urandom is used for cross platform purposes
- salt = os.urandom(bs - len('Salted__'))
-
- key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
- cipher = AES.new(key, AES.MODE_CBC, iv)
- out_file.write('Salted__' + salt)
- finished = False
- while not finished:
- chunk = in_file.read(1024 * bs)
- if len(chunk) == 0 or len(chunk) % bs != 0:
- padding_length = (bs - len(chunk) % bs) or bs
- chunk += padding_length * chr(padding_length)
- finished = True
- out_file.write(cipher.encrypt(chunk))
-
- out_file.seek(0)
- enc_data = out_file.read()
- tmp_data = hexlify(enc_data)
-
- return tmp_data
-
-
- def decrypt(self, data, password, key_length=32):
-
- """ Read encrypted data from in_file and write decrypted to out_file """
-
- # http://stackoverflow.com/a/14989032
-
- data = ''.join(data.split('\n'))
- data = unhexlify(data)
-
- in_file = BytesIO(data)
- in_file.seek(0)
- out_file = BytesIO()
-
- bs = AES.block_size
- salt = in_file.read(bs)[len('Salted__'):]
- key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
- cipher = AES.new(key, AES.MODE_CBC, iv)
- next_chunk = ''
- finished = False
-
- while not finished:
- chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))
- if len(next_chunk) == 0:
- padding_length = ord(chunk[-1])
- chunk = chunk[:-padding_length]
- finished = True
- out_file.write(chunk)
-
- # reset the stream pointer to the beginning
- out_file.seek(0)
- new_data = out_file.read()
-
- # split out sha and verify decryption
- split_data = new_data.split("\n")
- this_sha = split_data[0]
- this_data = '\n'.join(split_data[1:])
- test_sha = sha256(this_data).hexdigest()
-
- if this_sha != test_sha:
- raise errors.AnsibleError("Decryption failed")
-
- #return out_file.read()
- return this_data
-
-
-class VaultAES256(object):
-
- """
- Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
- Keys are derived using PBKDF2
- """
-
- # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
-
- def __init__(self):
-
- if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- def gen_key_initctr(self, password, salt):
- # 16 for AES 128, 32 for AES256
- keylength = 32
-
- # match the size used for counter.new to avoid extra work
- ivlength = 16
-
- hash_function = SHA256
-
- # make two keys and one iv
- pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest()
-
-
- derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength,
- count=10000, prf=pbkdf2_prf)
-
- key1 = derivedkey[:keylength]
- key2 = derivedkey[keylength:(keylength * 2)]
- iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength]
-
- return key1, key2, hexlify(iv)
-
-
- def encrypt(self, data, password):
-
- salt = os.urandom(32)
- key1, key2, iv = self.gen_key_initctr(password, salt)
-
- # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3
- bs = AES.block_size
- padding_length = (bs - len(data) % bs) or bs
- data += padding_length * chr(padding_length)
-
- # COUNTER.new PARAMETERS
- # 1) nbits (integer) - Length of the counter, in bits.
- # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr
-
- ctr = Counter.new(128, initial_value=long(iv, 16))
-
- # AES.new PARAMETERS
- # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr
- # 2) MODE_CTR, is the recommended mode
- # 3) counter=
-
- cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
-
- # ENCRYPT PADDED DATA
- cryptedData = cipher.encrypt(data)
-
- # COMBINE SALT, DIGEST AND DATA
- hmac = HMAC.new(key2, cryptedData, SHA256)
- message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) )
- message = hexlify(message)
- return message
-
- def decrypt(self, data, password):
-
- # SPLIT SALT, DIGEST, AND DATA
- data = ''.join(data.split("\n"))
- data = unhexlify(data)
- salt, cryptedHmac, cryptedData = data.split("\n", 2)
- salt = unhexlify(salt)
- cryptedData = unhexlify(cryptedData)
-
- key1, key2, iv = self.gen_key_initctr(password, salt)
-
- # EXIT EARLY IF DIGEST DOESN'T MATCH
- hmacDecrypt = HMAC.new(key2, cryptedData, SHA256)
- if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()):
- return None
-
- # SET THE COUNTER AND THE CIPHER
- ctr = Counter.new(128, initial_value=long(iv, 16))
- cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
-
- # DECRYPT PADDED DATA
- decryptedData = cipher.decrypt(cryptedData)
-
- # UNPAD DATA
- padding_length = ord(decryptedData[-1])
- decryptedData = decryptedData[:-padding_length]
-
- return decryptedData
-
- def is_equal(self, a, b):
- # http://codahale.com/a-lesson-in-timing-attacks/
- if len(a) != len(b):
- return False
-
- result = 0
- for x, y in zip(a, b):
- result |= ord(x) ^ ord(y)
- return result == 0
-
+ # STDERR not captured to make it easier for users to prompt for input in their scripts
+ p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
+ except OSError as e:
+ raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
+ stdout, stderr = p.communicate()
+ vault_pass = stdout.strip('\r\n')
+ else:
+ try:
+ f = open(this_path, "rb")
+ vault_pass=f.read().strip()
+ f.close()
+ except (OSError, IOError) as e:
+ raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
+
+ return vault_pass
diff --git a/v2/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py
similarity index 100%
rename from v2/ansible/vars/__init__.py
rename to lib/ansible/vars/__init__.py
diff --git a/v2/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py
similarity index 100%
rename from v2/ansible/vars/hostvars.py
rename to lib/ansible/vars/hostvars.py
diff --git a/v2/samples/README.md b/samples/README.md
similarity index 100%
rename from v2/samples/README.md
rename to samples/README.md
diff --git a/v2/samples/common_include.yml b/samples/common_include.yml
similarity index 100%
rename from v2/samples/common_include.yml
rename to samples/common_include.yml
diff --git a/v2/samples/hosts b/samples/hosts
similarity index 100%
rename from v2/samples/hosts
rename to samples/hosts
diff --git a/v2/samples/ignore_errors.yml b/samples/ignore_errors.yml
similarity index 100%
rename from v2/samples/ignore_errors.yml
rename to samples/ignore_errors.yml
diff --git a/v2/samples/include.yml b/samples/include.yml
similarity index 100%
rename from v2/samples/include.yml
rename to samples/include.yml
diff --git a/v2/samples/inv_lg b/samples/inv_lg
similarity index 100%
rename from v2/samples/inv_lg
rename to samples/inv_lg
diff --git a/v2/samples/inv_md b/samples/inv_md
similarity index 100%
rename from v2/samples/inv_md
rename to samples/inv_md
diff --git a/v2/samples/inv_sm b/samples/inv_sm
similarity index 100%
rename from v2/samples/inv_sm
rename to samples/inv_sm
diff --git a/v2/samples/l1_include.yml b/samples/l1_include.yml
similarity index 100%
rename from v2/samples/l1_include.yml
rename to samples/l1_include.yml
diff --git a/v2/samples/l2_include.yml b/samples/l2_include.yml
similarity index 100%
rename from v2/samples/l2_include.yml
rename to samples/l2_include.yml
diff --git a/v2/samples/l3_include.yml b/samples/l3_include.yml
similarity index 100%
rename from v2/samples/l3_include.yml
rename to samples/l3_include.yml
diff --git a/v2/samples/localhost_include.yml b/samples/localhost_include.yml
similarity index 100%
rename from v2/samples/localhost_include.yml
rename to samples/localhost_include.yml
diff --git a/v2/samples/localhosts b/samples/localhosts
similarity index 100%
rename from v2/samples/localhosts
rename to samples/localhosts
diff --git a/v2/samples/lookup_file.yml b/samples/lookup_file.yml
similarity index 100%
rename from v2/samples/lookup_file.yml
rename to samples/lookup_file.yml
diff --git a/v2/samples/lookup_password.yml b/samples/lookup_password.yml
similarity index 100%
rename from v2/samples/lookup_password.yml
rename to samples/lookup_password.yml
diff --git a/v2/samples/lookup_pipe.py b/samples/lookup_pipe.py
similarity index 100%
rename from v2/samples/lookup_pipe.py
rename to samples/lookup_pipe.py
diff --git a/v2/samples/lookup_template.yml b/samples/lookup_template.yml
similarity index 100%
rename from v2/samples/lookup_template.yml
rename to samples/lookup_template.yml
diff --git a/v2/samples/multi.py b/samples/multi.py
similarity index 100%
rename from v2/samples/multi.py
rename to samples/multi.py
diff --git a/v2/samples/multi_queues.py b/samples/multi_queues.py
similarity index 100%
rename from v2/samples/multi_queues.py
rename to samples/multi_queues.py
diff --git a/v2/samples/roles/common/meta/main.yml b/samples/roles/common/meta/main.yml
similarity index 100%
rename from v2/samples/roles/common/meta/main.yml
rename to samples/roles/common/meta/main.yml
diff --git a/v2/samples/roles/common/tasks/main.yml b/samples/roles/common/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/common/tasks/main.yml
rename to samples/roles/common/tasks/main.yml
diff --git a/v2/samples/roles/role_a/meta/main.yml b/samples/roles/role_a/meta/main.yml
similarity index 100%
rename from v2/samples/roles/role_a/meta/main.yml
rename to samples/roles/role_a/meta/main.yml
diff --git a/v2/samples/roles/role_a/tasks/main.yml b/samples/roles/role_a/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/role_a/tasks/main.yml
rename to samples/roles/role_a/tasks/main.yml
diff --git a/v2/samples/roles/role_b/meta/main.yml b/samples/roles/role_b/meta/main.yml
similarity index 100%
rename from v2/samples/roles/role_b/meta/main.yml
rename to samples/roles/role_b/meta/main.yml
diff --git a/v2/samples/roles/role_b/tasks/main.yml b/samples/roles/role_b/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/role_b/tasks/main.yml
rename to samples/roles/role_b/tasks/main.yml
diff --git a/v2/samples/roles/test_become_r1/meta/main.yml b/samples/roles/test_become_r1/meta/main.yml
similarity index 100%
rename from v2/samples/roles/test_become_r1/meta/main.yml
rename to samples/roles/test_become_r1/meta/main.yml
diff --git a/v2/samples/roles/test_become_r1/tasks/main.yml b/samples/roles/test_become_r1/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/test_become_r1/tasks/main.yml
rename to samples/roles/test_become_r1/tasks/main.yml
diff --git a/v2/samples/roles/test_become_r2/meta/main.yml b/samples/roles/test_become_r2/meta/main.yml
similarity index 100%
rename from v2/samples/roles/test_become_r2/meta/main.yml
rename to samples/roles/test_become_r2/meta/main.yml
diff --git a/v2/samples/roles/test_become_r2/tasks/main.yml b/samples/roles/test_become_r2/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/test_become_r2/tasks/main.yml
rename to samples/roles/test_become_r2/tasks/main.yml
diff --git a/v2/samples/roles/test_role/meta/main.yml b/samples/roles/test_role/meta/main.yml
similarity index 100%
rename from v2/samples/roles/test_role/meta/main.yml
rename to samples/roles/test_role/meta/main.yml
diff --git a/v2/samples/roles/test_role/tasks/main.yml b/samples/roles/test_role/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/test_role/tasks/main.yml
rename to samples/roles/test_role/tasks/main.yml
diff --git a/v2/samples/roles/test_role_dep/tasks/main.yml b/samples/roles/test_role_dep/tasks/main.yml
similarity index 100%
rename from v2/samples/roles/test_role_dep/tasks/main.yml
rename to samples/roles/test_role_dep/tasks/main.yml
diff --git a/v2/samples/src b/samples/src
similarity index 100%
rename from v2/samples/src
rename to samples/src
diff --git a/v2/samples/template.j2 b/samples/template.j2
similarity index 100%
rename from v2/samples/template.j2
rename to samples/template.j2
diff --git a/v2/samples/test_become.yml b/samples/test_become.yml
similarity index 100%
rename from v2/samples/test_become.yml
rename to samples/test_become.yml
diff --git a/v2/samples/test_big_debug.yml b/samples/test_big_debug.yml
similarity index 100%
rename from v2/samples/test_big_debug.yml
rename to samples/test_big_debug.yml
diff --git a/v2/samples/test_big_ping.yml b/samples/test_big_ping.yml
similarity index 100%
rename from v2/samples/test_big_ping.yml
rename to samples/test_big_ping.yml
diff --git a/v2/samples/test_block.yml b/samples/test_block.yml
similarity index 100%
rename from v2/samples/test_block.yml
rename to samples/test_block.yml
diff --git a/v2/samples/test_blocks_of_blocks.yml b/samples/test_blocks_of_blocks.yml
similarity index 100%
rename from v2/samples/test_blocks_of_blocks.yml
rename to samples/test_blocks_of_blocks.yml
diff --git a/v2/samples/test_fact_gather.yml b/samples/test_fact_gather.yml
similarity index 100%
rename from v2/samples/test_fact_gather.yml
rename to samples/test_fact_gather.yml
diff --git a/v2/samples/test_free.yml b/samples/test_free.yml
similarity index 100%
rename from v2/samples/test_free.yml
rename to samples/test_free.yml
diff --git a/v2/samples/test_include.yml b/samples/test_include.yml
similarity index 100%
rename from v2/samples/test_include.yml
rename to samples/test_include.yml
diff --git a/v2/samples/test_pb.yml b/samples/test_pb.yml
similarity index 100%
rename from v2/samples/test_pb.yml
rename to samples/test_pb.yml
diff --git a/v2/samples/test_role.yml b/samples/test_role.yml
similarity index 100%
rename from v2/samples/test_role.yml
rename to samples/test_role.yml
diff --git a/v2/samples/test_roles_complex.yml b/samples/test_roles_complex.yml
similarity index 100%
rename from v2/samples/test_roles_complex.yml
rename to samples/test_roles_complex.yml
diff --git a/v2/samples/test_run_once.yml b/samples/test_run_once.yml
similarity index 100%
rename from v2/samples/test_run_once.yml
rename to samples/test_run_once.yml
diff --git a/v2/samples/test_sudo.yml b/samples/test_sudo.yml
similarity index 100%
rename from v2/samples/test_sudo.yml
rename to samples/test_sudo.yml
diff --git a/v2/samples/test_tags.yml b/samples/test_tags.yml
similarity index 100%
rename from v2/samples/test_tags.yml
rename to samples/test_tags.yml
diff --git a/v2/samples/testing/extra_vars.yml b/samples/testing/extra_vars.yml
similarity index 100%
rename from v2/samples/testing/extra_vars.yml
rename to samples/testing/extra_vars.yml
diff --git a/v2/samples/testing/frag1 b/samples/testing/frag1
similarity index 100%
rename from v2/samples/testing/frag1
rename to samples/testing/frag1
diff --git a/v2/samples/testing/frag2 b/samples/testing/frag2
similarity index 100%
rename from v2/samples/testing/frag2
rename to samples/testing/frag2
diff --git a/v2/samples/testing/frag3 b/samples/testing/frag3
similarity index 100%
rename from v2/samples/testing/frag3
rename to samples/testing/frag3
diff --git a/v2/samples/testing/vars.yml b/samples/testing/vars.yml
similarity index 100%
rename from v2/samples/testing/vars.yml
rename to samples/testing/vars.yml
diff --git a/v2/samples/with_dict.yml b/samples/with_dict.yml
similarity index 100%
rename from v2/samples/with_dict.yml
rename to samples/with_dict.yml
diff --git a/v2/samples/with_env.yml b/samples/with_env.yml
similarity index 100%
rename from v2/samples/with_env.yml
rename to samples/with_env.yml
diff --git a/v2/samples/with_fileglob.yml b/samples/with_fileglob.yml
similarity index 100%
rename from v2/samples/with_fileglob.yml
rename to samples/with_fileglob.yml
diff --git a/v2/samples/with_first_found.yml b/samples/with_first_found.yml
similarity index 100%
rename from v2/samples/with_first_found.yml
rename to samples/with_first_found.yml
diff --git a/v2/samples/with_flattened.yml b/samples/with_flattened.yml
similarity index 100%
rename from v2/samples/with_flattened.yml
rename to samples/with_flattened.yml
diff --git a/v2/samples/with_indexed_items.yml b/samples/with_indexed_items.yml
similarity index 100%
rename from v2/samples/with_indexed_items.yml
rename to samples/with_indexed_items.yml
diff --git a/v2/samples/with_items.yml b/samples/with_items.yml
similarity index 100%
rename from v2/samples/with_items.yml
rename to samples/with_items.yml
diff --git a/v2/samples/with_lines.yml b/samples/with_lines.yml
similarity index 100%
rename from v2/samples/with_lines.yml
rename to samples/with_lines.yml
diff --git a/v2/samples/with_nested.yml b/samples/with_nested.yml
similarity index 100%
rename from v2/samples/with_nested.yml
rename to samples/with_nested.yml
diff --git a/v2/samples/with_random_choice.yml b/samples/with_random_choice.yml
similarity index 100%
rename from v2/samples/with_random_choice.yml
rename to samples/with_random_choice.yml
diff --git a/v2/samples/with_sequence.yml b/samples/with_sequence.yml
similarity index 100%
rename from v2/samples/with_sequence.yml
rename to samples/with_sequence.yml
diff --git a/v2/samples/with_subelements.yml b/samples/with_subelements.yml
similarity index 100%
rename from v2/samples/with_subelements.yml
rename to samples/with_subelements.yml
diff --git a/v2/samples/with_together.yml b/samples/with_together.yml
similarity index 100%
rename from v2/samples/with_together.yml
rename to samples/with_together.yml
diff --git a/v2/test/__init__.py b/test/units/__init__.py
similarity index 100%
rename from v2/test/__init__.py
rename to test/units/__init__.py
diff --git a/v2/test/errors/__init__.py b/test/units/errors/__init__.py
similarity index 100%
rename from v2/test/errors/__init__.py
rename to test/units/errors/__init__.py
diff --git a/v2/test/errors/test_errors.py b/test/units/errors/test_errors.py
similarity index 100%
rename from v2/test/errors/test_errors.py
rename to test/units/errors/test_errors.py
diff --git a/v2/test/executor/__init__.py b/test/units/executor/__init__.py
similarity index 100%
rename from v2/test/executor/__init__.py
rename to test/units/executor/__init__.py
diff --git a/v2/test/executor/test_play_iterator.py b/test/units/executor/test_play_iterator.py
similarity index 100%
rename from v2/test/executor/test_play_iterator.py
rename to test/units/executor/test_play_iterator.py
diff --git a/v2/ansible/modules/__init__.py b/test/units/mock/__init__.py
similarity index 100%
rename from v2/ansible/modules/__init__.py
rename to test/units/mock/__init__.py
diff --git a/v2/test/mock/loader.py b/test/units/mock/loader.py
similarity index 100%
rename from v2/test/mock/loader.py
rename to test/units/mock/loader.py
diff --git a/v2/test/parsing/__init__.py b/test/units/parsing/__init__.py
similarity index 100%
rename from v2/test/parsing/__init__.py
rename to test/units/parsing/__init__.py
diff --git a/v2/test/parsing/test_data_loader.py b/test/units/parsing/test_data_loader.py
similarity index 100%
rename from v2/test/parsing/test_data_loader.py
rename to test/units/parsing/test_data_loader.py
diff --git a/v2/test/parsing/test_mod_args.py b/test/units/parsing/test_mod_args.py
similarity index 100%
rename from v2/test/parsing/test_mod_args.py
rename to test/units/parsing/test_mod_args.py
diff --git a/v2/test/parsing/test_splitter.py b/test/units/parsing/test_splitter.py
similarity index 100%
rename from v2/test/parsing/test_splitter.py
rename to test/units/parsing/test_splitter.py
diff --git a/v2/test/parsing/vault/__init__.py b/test/units/parsing/vault/__init__.py
similarity index 100%
rename from v2/test/parsing/vault/__init__.py
rename to test/units/parsing/vault/__init__.py
diff --git a/v2/test/parsing/vault/test_vault.py b/test/units/parsing/vault/test_vault.py
similarity index 100%
rename from v2/test/parsing/vault/test_vault.py
rename to test/units/parsing/vault/test_vault.py
diff --git a/v2/test/parsing/vault/test_vault_editor.py b/test/units/parsing/vault/test_vault_editor.py
similarity index 100%
rename from v2/test/parsing/vault/test_vault_editor.py
rename to test/units/parsing/vault/test_vault_editor.py
diff --git a/lib/ansible/callback_plugins/__init__.py b/test/units/parsing/yaml/__init__.py
similarity index 100%
rename from lib/ansible/callback_plugins/__init__.py
rename to test/units/parsing/yaml/__init__.py
diff --git a/v2/test/parsing/yaml/test_loader.py b/test/units/parsing/yaml/test_loader.py
similarity index 100%
rename from v2/test/parsing/yaml/test_loader.py
rename to test/units/parsing/yaml/test_loader.py
diff --git a/v2/test/playbook/__init__.py b/test/units/playbook/__init__.py
similarity index 100%
rename from v2/test/playbook/__init__.py
rename to test/units/playbook/__init__.py
diff --git a/v2/test/playbook/test_block.py b/test/units/playbook/test_block.py
similarity index 100%
rename from v2/test/playbook/test_block.py
rename to test/units/playbook/test_block.py
diff --git a/v2/test/playbook/test_play.py b/test/units/playbook/test_play.py
similarity index 100%
rename from v2/test/playbook/test_play.py
rename to test/units/playbook/test_play.py
diff --git a/v2/test/playbook/test_playbook.py b/test/units/playbook/test_playbook.py
similarity index 100%
rename from v2/test/playbook/test_playbook.py
rename to test/units/playbook/test_playbook.py
diff --git a/v2/test/playbook/test_role.py b/test/units/playbook/test_role.py
similarity index 100%
rename from v2/test/playbook/test_role.py
rename to test/units/playbook/test_role.py
diff --git a/v2/test/playbook/test_task.py b/test/units/playbook/test_task.py
similarity index 100%
rename from v2/test/playbook/test_task.py
rename to test/units/playbook/test_task.py
diff --git a/v2/test/plugins/__init__.py b/test/units/plugins/__init__.py
similarity index 100%
rename from v2/test/plugins/__init__.py
rename to test/units/plugins/__init__.py
diff --git a/v2/test/plugins/test_cache.py b/test/units/plugins/test_cache.py
similarity index 100%
rename from v2/test/plugins/test_cache.py
rename to test/units/plugins/test_cache.py
diff --git a/v2/test/plugins/test_connection.py b/test/units/plugins/test_connection.py
similarity index 100%
rename from v2/test/plugins/test_connection.py
rename to test/units/plugins/test_connection.py
diff --git a/v2/test/plugins/test_plugins.py b/test/units/plugins/test_plugins.py
similarity index 100%
rename from v2/test/plugins/test_plugins.py
rename to test/units/plugins/test_plugins.py
diff --git a/v2/test/vars/__init__.py b/test/units/vars/__init__.py
similarity index 100%
rename from v2/test/vars/__init__.py
rename to test/units/vars/__init__.py
diff --git a/v2/test/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py
similarity index 100%
rename from v2/test/vars/test_variable_manager.py
rename to test/units/vars/test_variable_manager.py
diff --git a/v2/ansible/utils/__init__.py b/v1/ansible/__init__.py
similarity index 85%
rename from v2/ansible/utils/__init__.py
rename to v1/ansible/__init__.py
index ae8ccff5952..ba5ca83b723 100644
--- a/v2/ansible/utils/__init__.py
+++ b/v1/ansible/__init__.py
@@ -14,7 +14,5 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+__version__ = '2.0.0'
+__author__ = 'Michael DeHaan'
diff --git a/lib/ansible/cache/__init__.py b/v1/ansible/cache/__init__.py
similarity index 100%
rename from lib/ansible/cache/__init__.py
rename to v1/ansible/cache/__init__.py
diff --git a/lib/ansible/cache/base.py b/v1/ansible/cache/base.py
similarity index 100%
rename from lib/ansible/cache/base.py
rename to v1/ansible/cache/base.py
diff --git a/lib/ansible/cache/jsonfile.py b/v1/ansible/cache/jsonfile.py
similarity index 100%
rename from lib/ansible/cache/jsonfile.py
rename to v1/ansible/cache/jsonfile.py
diff --git a/lib/ansible/cache/memcached.py b/v1/ansible/cache/memcached.py
similarity index 100%
rename from lib/ansible/cache/memcached.py
rename to v1/ansible/cache/memcached.py
diff --git a/lib/ansible/cache/memory.py b/v1/ansible/cache/memory.py
similarity index 100%
rename from lib/ansible/cache/memory.py
rename to v1/ansible/cache/memory.py
diff --git a/lib/ansible/cache/redis.py b/v1/ansible/cache/redis.py
similarity index 100%
rename from lib/ansible/cache/redis.py
rename to v1/ansible/cache/redis.py
diff --git a/lib/ansible/runner/action_plugins/__init__.py b/v1/ansible/callback_plugins/__init__.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/__init__.py
rename to v1/ansible/callback_plugins/__init__.py
diff --git a/lib/ansible/callback_plugins/noop.py b/v1/ansible/callback_plugins/noop.py
similarity index 100%
rename from lib/ansible/callback_plugins/noop.py
rename to v1/ansible/callback_plugins/noop.py
diff --git a/lib/ansible/callbacks.py b/v1/ansible/callbacks.py
similarity index 100%
rename from lib/ansible/callbacks.py
rename to v1/ansible/callbacks.py
diff --git a/lib/ansible/color.py b/v1/ansible/color.py
similarity index 100%
rename from lib/ansible/color.py
rename to v1/ansible/color.py
diff --git a/v2/ansible/constants.py b/v1/ansible/constants.py
similarity index 89%
rename from v2/ansible/constants.py
rename to v1/ansible/constants.py
index 456beb8bbc4..089de5b7c5b 100644
--- a/v2/ansible/constants.py
+++ b/v1/ansible/constants.py
@@ -15,15 +15,10 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import os
import pwd
import sys
-
-from six.moves import configparser
+import ConfigParser
from string import ascii_letters, digits
# copied from utils, avoid circular reference fun :)
@@ -40,15 +35,13 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False,
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
- value = mk_boolean(value)
- if value:
- if integer:
- value = int(value)
- elif floating:
- value = float(value)
- elif islist:
- if isinstance(value, basestring):
- value = [x.strip() for x in value.split(',')]
+ return mk_boolean(value)
+ if value and integer:
+ return int(value)
+ if value and floating:
+ return float(value)
+ if value and islist:
+ return [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
@@ -67,7 +60,7 @@ def _get_config(p, section, key, env_var, default):
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
- p = configparser.ConfigParser()
+ p = ConfigParser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
@@ -80,8 +73,8 @@ def load_config_file():
if path is not None and os.path.exists(path):
try:
p.read(path)
- except configparser.Error as e:
- print("Error reading config file: \n{0}".format(e))
+ except ConfigParser.Error as e:
+ print "Error reading config file: \n%s" % e
sys.exit(1)
return p
return None
@@ -105,8 +98,7 @@ YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
DEFAULTS='defaults'
# configurable things
-DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
-DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts')))
+DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts')))
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
@@ -120,7 +112,6 @@ DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
-DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
@@ -131,6 +122,7 @@ DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None,
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
+DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
@@ -149,7 +141,7 @@ BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
-DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
+DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# need to rethink impementing these 2
DEFAULT_BECOME_EXE = None
@@ -164,7 +156,6 @@ DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', '
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
-DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
@@ -182,8 +173,8 @@ DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings',
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
-RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
-RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
+DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
+
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
@@ -205,16 +196,10 @@ ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_fi
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
-# galaxy related
-DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
-# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
-GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', ['git','hg'], islist=True)
-
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things
-MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
diff --git a/lib/ansible/errors.py b/v1/ansible/errors.py
similarity index 100%
rename from lib/ansible/errors.py
rename to v1/ansible/errors.py
diff --git a/v2/ansible/inventory/__init__.py b/v1/ansible/inventory/__init__.py
similarity index 88%
rename from v2/ansible/inventory/__init__.py
rename to v1/ansible/inventory/__init__.py
index 063398f17f9..2048046d3c1 100644
--- a/v2/ansible/inventory/__init__.py
+++ b/v1/ansible/inventory/__init__.py
@@ -16,44 +16,36 @@
# along with Ansible. If not, see .
#############################################
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import fnmatch
import os
import sys
import re
-import stat
import subprocess
-from ansible import constants as C
-from ansible.errors import *
-
+import ansible.constants as C
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
from ansible.inventory.dir import InventoryDirectory
from ansible.inventory.group import Group
from ansible.inventory.host import Host
-from ansible.plugins import vars_loader
-from ansible.utils.path import is_executable
-from ansible.utils.vars import combine_vars
+from ansible import errors
+from ansible import utils
class Inventory(object):
"""
Host inventory for ansible.
"""
- #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
- # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
- # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
+ __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
+ 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
+ '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
- def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
+ def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = host_list
- self._loader = loader
- self._variable_manager = variable_manager
+ self._vault_password=vault_password
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
@@ -105,7 +97,7 @@ class Inventory(object):
if os.path.isdir(host_list):
# Ensure basedir is inside the directory
self.host_list = os.path.join(self.host_list, "")
- self.parser = InventoryDirectory(loader=self._loader, filename=host_list)
+ self.parser = InventoryDirectory(filename=host_list)
self.groups = self.parser.groups.values()
else:
# check to see if the specified file starts with a
@@ -121,9 +113,9 @@ class Inventory(object):
except:
pass
- if is_executable(host_list):
+ if utils.is_executable(host_list):
try:
- self.parser = InventoryScript(loader=self._loader, filename=host_list)
+ self.parser = InventoryScript(filename=host_list)
self.groups = self.parser.groups.values()
except:
if not shebang_present:
@@ -142,23 +134,19 @@ class Inventory(object):
else:
raise
- vars_loader.add_directory(self.basedir(), with_subdir=True)
+ utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True)
else:
raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
- self._vars_plugins = [ x for x in vars_loader.all(self) ]
+ self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ]
- # FIXME: shouldn't be required, since the group/host vars file
- # management will be done in VariableManager
# get group vars from group_vars/ files and vars plugins
for group in self.groups:
- # FIXME: combine_vars
- group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
+ group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password))
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts():
- # FIXME: combine_vars
- host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
+ host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password))
def _match(self, str, pattern_str):
@@ -204,9 +192,9 @@ class Inventory(object):
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
- hosts = [ h for h in hosts if h in self._restriction ]
+ hosts = [ h for h in hosts if h.name in self._restriction ]
if self._also_restriction is not None:
- hosts = [ h for h in hosts if h in self._also_restriction ]
+ hosts = [ h for h in hosts if h.name in self._also_restriction ]
return hosts
@@ -332,8 +320,6 @@ class Inventory(object):
new_host = Host(pattern)
new_host.set_variable("ansible_python_interpreter", sys.executable)
new_host.set_variable("ansible_connection", "local")
- new_host.ipv4_address = '127.0.0.1'
-
ungrouped = self.get_group("ungrouped")
if ungrouped is None:
self.add_group(Group('ungrouped'))
@@ -434,7 +420,7 @@ class Inventory(object):
group = self.get_group(groupname)
if group is None:
- raise Exception("group not found: %s" % groupname)
+ raise errors.AnsibleError("group not found: %s" % groupname)
vars = {}
@@ -442,21 +428,19 @@ class Inventory(object):
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
for updated in vars_results:
if updated is not None:
- # FIXME: combine_vars
- vars = combine_vars(vars, updated)
+ vars = utils.combine_vars(vars, updated)
# Read group_vars/ files
- # FIXME: combine_vars
- vars = combine_vars(vars, self.get_group_vars(group))
+ vars = utils.combine_vars(vars, self.get_group_vars(group))
return vars
- def get_vars(self, hostname, update_cached=False, vault_password=None):
+ def get_variables(self, hostname, update_cached=False, vault_password=None):
host = self.get_host(hostname)
if not host:
- raise Exception("host not found: %s" % hostname)
- return host.get_vars()
+ raise errors.AnsibleError("host not found: %s" % hostname)
+ return host.get_variables()
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
@@ -476,26 +460,22 @@ class Inventory(object):
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
for updated in vars_results:
if updated is not None:
- # FIXME: combine_vars
- vars = combine_vars(vars, updated)
+ vars = utils.combine_vars(vars, updated)
# plugin.get_host_vars retrieves just vars for specific host
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
for updated in vars_results:
if updated is not None:
- # FIXME: combine_vars
- vars = combine_vars(vars, updated)
+ vars = utils.combine_vars(vars, updated)
# still need to check InventoryParser per host vars
# which actually means InventoryScript per host,
# which is not performant
if self.parser is not None:
- # FIXME: combine_vars
- vars = combine_vars(vars, self.parser.get_host_variables(host))
+ vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
# Read host_vars/ files
- # FIXME: combine_vars
- vars = combine_vars(vars, self.get_host_vars(host))
+ vars = utils.combine_vars(vars, self.get_host_vars(host))
return vars
@@ -510,7 +490,7 @@ class Inventory(object):
""" return a list of hostnames for a pattern """
- result = [ h for h in self.get_hosts(pattern) ]
+ result = [ h.name for h in self.get_hosts(pattern) ]
if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]:
result = [pattern]
return result
@@ -518,7 +498,11 @@ class Inventory(object):
def list_groups(self):
return sorted([ g.name for g in self.groups ], key=lambda x: x)
- def restrict_to_hosts(self, restriction):
+ # TODO: remove this function
+ def get_restriction(self):
+ return self._restriction
+
+ def restrict_to(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to exclude failed hosts in main playbook code, don't use this for other
@@ -560,7 +544,7 @@ class Inventory(object):
results.append(x)
self._subset = results
- def remove_restriction(self):
+ def lift_restriction(self):
""" Do not restrict list operations """
self._restriction = None
@@ -604,12 +588,10 @@ class Inventory(object):
self._playbook_basedir = dir
# get group vars from group_vars/ files
for group in self.groups:
- # FIXME: combine_vars
- group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
+ group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
# get host vars from host_vars/ files
for host in self.get_hosts():
- # FIXME: combine_vars
- host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
+ host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
# invalidate cache
self._vars_per_host = {}
self._vars_per_group = {}
@@ -657,15 +639,15 @@ class Inventory(object):
if _basedir == self._playbook_basedir and scan_pass != 1:
continue
- # FIXME: these should go to VariableManager
if group and host is None:
# load vars in dir/group_vars/name_of_group
base_path = os.path.join(basedir, "group_vars/%s" % group.name)
- self._variable_manager.add_group_vars_file(base_path, self._loader)
+ results = utils.load_vars(base_path, results, vault_password=self._vault_password)
+
elif host and group is None:
# same for hostvars in dir/host_vars/name_of_host
base_path = os.path.join(basedir, "host_vars/%s" % host.name)
- self._variable_manager.add_host_vars_file(base_path, self._loader)
+ results = utils.load_vars(base_path, results, vault_password=self._vault_password)
# all done, results is a dictionary of variables for this particular host.
return results
diff --git a/v2/ansible/inventory/dir.py b/v1/ansible/inventory/dir.py
similarity index 91%
rename from v2/ansible/inventory/dir.py
rename to v1/ansible/inventory/dir.py
index 735f32d62c3..9ac23fff899 100644
--- a/v2/ansible/inventory/dir.py
+++ b/v1/ansible/inventory/dir.py
@@ -17,25 +17,20 @@
# along with Ansible. If not, see .
#############################################
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
import os
-
-from ansible import constants as C
-from ansible.errors import AnsibleError
-
+import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
-from ansible.utils.path import is_executable
-from ansible.utils.vars import combine_vars
+from ansible import utils
+from ansible import errors
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
- def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, filename=C.DEFAULT_HOST_LIST):
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
@@ -43,12 +38,10 @@ class InventoryDirectory(object):
self.hosts = {}
self.groups = {}
- self._loader = loader
-
for i in self.names:
# Skip files that end with certain extensions or characters
- if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo")):
+ if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
continue
# Skip hidden files
if i.startswith('.') and not i.startswith('./'):
@@ -58,9 +51,9 @@ class InventoryDirectory(object):
continue
fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath):
- parser = InventoryDirectory(loader=loader, filename=fullpath)
- elif is_executable(fullpath):
- parser = InventoryScript(loader=loader, filename=fullpath)
+ parser = InventoryDirectory(filename=fullpath)
+ elif utils.is_executable(fullpath):
+ parser = InventoryScript(filename=fullpath)
else:
parser = InventoryParser(filename=fullpath)
self.parsers.append(parser)
@@ -160,7 +153,7 @@ class InventoryDirectory(object):
# name
if group.name != newgroup.name:
- raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
+ raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
# depth
group.depth = max([group.depth, newgroup.depth])
@@ -203,14 +196,14 @@ class InventoryDirectory(object):
self.groups[newparent.name].add_child_group(group)
# variables
- group.vars = combine_vars(group.vars, newgroup.vars)
+ group.vars = utils.combine_vars(group.vars, newgroup.vars)
def _merge_hosts(self,host, newhost):
""" Merge all of instance newhost into host """
# name
if host.name != newhost.name:
- raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
+ raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
# group membership relation
for newgroup in newhost.groups:
@@ -225,7 +218,7 @@ class InventoryDirectory(object):
self.groups[newgroup.name].add_host(host)
# variables
- host.vars = combine_vars(host.vars, newhost.vars)
+ host.vars = utils.combine_vars(host.vars, newhost.vars)
def get_host_variables(self, host):
""" Gets additional host variables from all inventories """
diff --git a/v2/ansible/inventory/expand_hosts.py b/v1/ansible/inventory/expand_hosts.py
similarity index 97%
rename from v2/ansible/inventory/expand_hosts.py
rename to v1/ansible/inventory/expand_hosts.py
index b5a957c53fe..f1297409355 100644
--- a/v2/ansible/inventory/expand_hosts.py
+++ b/v1/ansible/inventory/expand_hosts.py
@@ -30,9 +30,6 @@ expanded into 001, 002 ...009, 010.
Note that when beg is specified with left zero padding, then the length of
end must be the same as that of beg, else an exception is raised.
'''
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
import string
from ansible import errors
diff --git a/v2/ansible/inventory/group.py b/v1/ansible/inventory/group.py
similarity index 69%
rename from v2/ansible/inventory/group.py
rename to v1/ansible/inventory/group.py
index 6525e69b466..262558e69c8 100644
--- a/v2/ansible/inventory/group.py
+++ b/v1/ansible/inventory/group.py
@@ -14,15 +14,11 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-from ansible.utils.debug import debug
-
-class Group:
+class Group(object):
''' a group of ansible hosts '''
- #__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
+ __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
@@ -33,49 +29,9 @@ class Group:
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
-
#self.clear_hosts_cache()
- #if self.name is None:
- # raise Exception("group name is required")
-
- def __repr__(self):
- return self.get_name()
-
- def __getstate__(self):
- return self.serialize()
-
- def __setstate__(self, data):
- return self.deserialize(data)
-
- def serialize(self):
- parent_groups = []
- for parent in self.parent_groups:
- parent_groups.append(parent.serialize())
-
- result = dict(
- name=self.name,
- vars=self.vars.copy(),
- parent_groups=parent_groups,
- depth=self.depth,
- )
-
- debug("serializing group, result is: %s" % result)
- return result
-
- def deserialize(self, data):
- debug("deserializing group, data is: %s" % data)
- self.__init__()
- self.name = data.get('name')
- self.vars = data.get('vars', dict())
-
- parent_groups = data.get('parent_groups', [])
- for parent_data in parent_groups:
- g = Group()
- g.deserialize(parent_data)
- self.parent_groups.append(g)
-
- def get_name(self):
- return self.name
+ if self.name is None:
+ raise Exception("group name is required")
def add_child_group(self, group):
@@ -144,7 +100,7 @@ class Group:
hosts.append(mine)
return hosts
- def get_vars(self):
+ def get_variables(self):
return self.vars.copy()
def _get_ancestors(self):
diff --git a/v1/ansible/inventory/host.py b/v1/ansible/inventory/host.py
new file mode 100644
index 00000000000..d4dc20fa462
--- /dev/null
+++ b/v1/ansible/inventory/host.py
@@ -0,0 +1,67 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import ansible.constants as C
+from ansible import utils
+
+class Host(object):
+ ''' a single ansible host '''
+
+ __slots__ = [ 'name', 'vars', 'groups' ]
+
+ def __init__(self, name=None, port=None):
+
+ self.name = name
+ self.vars = {}
+ self.groups = []
+ if port and port != C.DEFAULT_REMOTE_PORT:
+ self.set_variable('ansible_ssh_port', int(port))
+
+ if self.name is None:
+ raise Exception("host name is required")
+
+ def add_group(self, group):
+
+ self.groups.append(group)
+
+ def set_variable(self, key, value):
+
+ self.vars[key]=value
+
+ def get_groups(self):
+
+ groups = {}
+ for g in self.groups:
+ groups[g.name] = g
+ ancestors = g.get_ancestors()
+ for a in ancestors:
+ groups[a.name] = a
+ return groups.values()
+
+ def get_variables(self):
+
+ results = {}
+ groups = self.get_groups()
+ for group in sorted(groups, key=lambda g: g.depth):
+ results = utils.combine_vars(results, group.get_variables())
+ results = utils.combine_vars(results, self.vars)
+ results['inventory_hostname'] = self.name
+ results['inventory_hostname_short'] = self.name.split('.')[0]
+ results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
+ return results
+
+
diff --git a/v2/ansible/inventory/ini.py b/v1/ansible/inventory/ini.py
similarity index 82%
rename from v2/ansible/inventory/ini.py
rename to v1/ansible/inventory/ini.py
index e004ee8bb75..bd9a98e7f86 100644
--- a/v2/ansible/inventory/ini.py
+++ b/v1/ansible/inventory/ini.py
@@ -16,20 +16,17 @@
# along with Ansible. If not, see .
#############################################
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-import ast
-import shlex
-import re
-
-from ansible import constants as C
-from ansible.errors import *
+import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
-from ansible.utils.unicode import to_unicode
+from ansible import errors
+from ansible import utils
+import shlex
+import re
+import ast
class InventoryParser(object):
"""
@@ -37,8 +34,9 @@ class InventoryParser(object):
"""
def __init__(self, filename=C.DEFAULT_HOST_LIST):
- self.filename = filename
+
with open(filename) as fh:
+ self.filename = filename
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
@@ -56,7 +54,10 @@ class InventoryParser(object):
def _parse_value(v):
if "#" not in v:
try:
- v = ast.literal_eval(v)
+ ret = ast.literal_eval(v)
+ if not isinstance(ret, float):
+ # Do not trim floats. Eg: "1.20" to 1.2
+ return ret
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
@@ -65,7 +66,7 @@ class InventoryParser(object):
except SyntaxError:
# Is this a hash with an equals at the end?
pass
- return to_unicode(v, nonstring='passthru', errors='strict')
+ return v
# [webservers]
# alpha
@@ -90,8 +91,8 @@ class InventoryParser(object):
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
- for line in self.lines:
- line = self._before_comment(line).strip()
+ for lineno in range(len(self.lines)):
+ line = utils.before_comment(self.lines[lineno]).strip()
if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line:
@@ -145,11 +146,8 @@ class InventoryParser(object):
try:
(k,v) = t.split("=", 1)
except ValueError, e:
- raise AnsibleError("Invalid ini entry in %s: %s - %s" % (self.filename, t, str(e)))
- if k == 'ansible_ssh_host':
- host.ipv4_address = self._parse_value(v)
- else:
- host.set_variable(k, self._parse_value(v))
+ raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e)))
+ host.set_variable(k, self._parse_value(v))
self.groups[active_group_name].add_host(host)
# [southeast:children]
@@ -159,8 +157,8 @@ class InventoryParser(object):
def _parse_group_children(self):
group = None
- for line in self.lines:
- line = line.strip()
+ for lineno in range(len(self.lines)):
+ line = self.lines[lineno].strip()
if line is None or line == '':
continue
if line.startswith("[") and ":children]" in line:
@@ -175,7 +173,7 @@ class InventoryParser(object):
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
- raise AnsibleError("child group is not defined: (%s)" % line)
+ raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line))
else:
group.add_child_group(kid_group)
@@ -186,13 +184,13 @@ class InventoryParser(object):
def _parse_group_variables(self):
group = None
- for line in self.lines:
- line = line.strip()
+ for lineno in range(len(self.lines)):
+ line = self.lines[lineno].strip()
if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
- raise AnsibleError("can't add vars to undefined group: %s" % line)
+ raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line))
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
@@ -201,18 +199,10 @@ class InventoryParser(object):
pass
elif group:
if "=" not in line:
- raise AnsibleError("variables assigned to group must be in key=value form")
+ raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1))
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host):
return {}
-
- def _before_comment(self, msg):
- ''' what's the part of a string before a comment? '''
- msg = msg.replace("\#","**NOT_A_COMMENT**")
- msg = msg.split("#")[0]
- msg = msg.replace("**NOT_A_COMMENT**","#")
- return msg
-
diff --git a/v2/ansible/inventory/script.py b/v1/ansible/inventory/script.py
similarity index 82%
rename from v2/ansible/inventory/script.py
rename to v1/ansible/inventory/script.py
index 9675d70f690..b83cb9bcc7a 100644
--- a/v2/ansible/inventory/script.py
+++ b/v1/ansible/inventory/script.py
@@ -16,26 +16,22 @@
# along with Ansible. If not, see .
#############################################
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
import os
import subprocess
-import sys
-
-from ansible import constants as C
-from ansible.errors import *
+import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
+from ansible import utils
+from ansible import errors
+import sys
-class InventoryScript:
+class InventoryScript(object):
''' Host inventory parser for ansible using external inventory scripts. '''
- def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
-
- self._loader = loader
+ def __init__(self, filename=C.DEFAULT_HOST_LIST):
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
@@ -45,11 +41,11 @@ class InventoryScript:
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
- raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate()
if sp.returncode != 0:
- raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
+ raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
self.data = stdout
# see comment about _meta below
@@ -62,7 +58,7 @@ class InventoryScript:
all_hosts = {}
# not passing from_remote because data from CMDB is trusted
- self.raw = self._loader.load(self.data)
+ self.raw = utils.parse_json(self.data)
self.raw = json_dict_bytes_to_unicode(self.raw)
all = Group('all')
@@ -72,7 +68,7 @@ class InventoryScript:
if 'failed' in self.raw:
sys.stderr.write(err + "\n")
- raise AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
+ raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
for (group_name, data) in self.raw.items():
@@ -96,12 +92,12 @@ class InventoryScript:
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
- elif not any(k in data for k in ('hosts','vars')):
+ elif not any(k in data for k in ('hosts','vars','children')):
data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
- raise AnsibleError("You defined a group \"%s\" with bad "
+ raise errors.AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']:
@@ -112,7 +108,7 @@ class InventoryScript:
if 'vars' in data:
if not isinstance(data['vars'], dict):
- raise AnsibleError("You defined a group \"%s\" with bad "
+ raise errors.AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
for k, v in data['vars'].iteritems():
@@ -147,12 +143,12 @@ class InventoryScript:
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
- raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return dict()
try:
- return json_dict_bytes_to_unicode(self._loader.load(out))
+ return json_dict_bytes_to_unicode(utils.parse_json(out))
except ValueError:
- raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
+ raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
diff --git a/lib/ansible/runner/connection_plugins/__init__.py b/v1/ansible/inventory/vars_plugins/__init__.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/__init__.py
rename to v1/ansible/inventory/vars_plugins/__init__.py
diff --git a/v2/ansible/inventory/vars_plugins/noop.py b/v1/ansible/inventory/vars_plugins/noop.py
similarity index 94%
rename from v2/ansible/inventory/vars_plugins/noop.py
rename to v1/ansible/inventory/vars_plugins/noop.py
index 8f0c98cad56..5d4b4b6658c 100644
--- a/v2/ansible/inventory/vars_plugins/noop.py
+++ b/v1/ansible/inventory/vars_plugins/noop.py
@@ -15,8 +15,6 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
class VarsModule(object):
diff --git a/lib/ansible/module_common.py b/v1/ansible/module_common.py
similarity index 100%
rename from lib/ansible/module_common.py
rename to v1/ansible/module_common.py
diff --git a/v2/ansible/module_utils/__init__.py b/v1/ansible/module_utils/__init__.py
similarity index 100%
rename from v2/ansible/module_utils/__init__.py
rename to v1/ansible/module_utils/__init__.py
diff --git a/v2/ansible/module_utils/a10.py b/v1/ansible/module_utils/a10.py
similarity index 100%
rename from v2/ansible/module_utils/a10.py
rename to v1/ansible/module_utils/a10.py
diff --git a/v2/ansible/module_utils/basic.py b/v1/ansible/module_utils/basic.py
similarity index 97%
rename from v2/ansible/module_utils/basic.py
rename to v1/ansible/module_utils/basic.py
index 8f9b03f882d..54a1a9cfff7 100644
--- a/v2/ansible/module_utils/basic.py
+++ b/v1/ansible/module_utils/basic.py
@@ -43,7 +43,7 @@ BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
# can be inserted in any module source automatically by including
# #<> on a blank line by itself inside
# of an ansible module. The source of this common code lives
-# in ansible/executor/module_common.py
+# in lib/ansible/module_common.py
import locale
import os
@@ -65,7 +65,6 @@ import pwd
import platform
import errno
import tempfile
-from itertools import imap, repeat
try:
import json
@@ -235,7 +234,7 @@ def load_platform_subclass(cls, *args, **kwargs):
return super(cls, subclass).__new__(subclass)
-def json_dict_unicode_to_bytes(d, encoding='utf-8'):
+def json_dict_unicode_to_bytes(d):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -243,17 +242,17 @@ def json_dict_unicode_to_bytes(d, encoding='utf-8'):
'''
if isinstance(d, unicode):
- return d.encode(encoding)
+ return d.encode('utf-8')
elif isinstance(d, dict):
- return dict(imap(json_dict_unicode_to_bytes, d.iteritems(), repeat(encoding)))
+ return dict(map(json_dict_unicode_to_bytes, d.iteritems()))
elif isinstance(d, list):
- return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
+ return list(map(json_dict_unicode_to_bytes, d))
elif isinstance(d, tuple):
- return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
+ return tuple(map(json_dict_unicode_to_bytes, d))
else:
return d
-def json_dict_bytes_to_unicode(d, encoding='utf-8'):
+def json_dict_bytes_to_unicode(d):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -261,13 +260,13 @@ def json_dict_bytes_to_unicode(d, encoding='utf-8'):
'''
if isinstance(d, str):
- return unicode(d, encoding)
+ return unicode(d, 'utf-8')
elif isinstance(d, dict):
- return dict(imap(json_dict_bytes_to_unicode, d.iteritems(), repeat(encoding)))
+ return dict(map(json_dict_bytes_to_unicode, d.iteritems()))
elif isinstance(d, list):
- return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
+ return list(map(json_dict_bytes_to_unicode, d))
elif isinstance(d, tuple):
- return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
+ return tuple(map(json_dict_bytes_to_unicode, d))
else:
return d
@@ -360,9 +359,9 @@ class AnsibleModule(object):
# reset to LANG=C if it's an invalid/unavailable locale
self._check_locale()
- self.params = self._load_params()
+ (self.params, self.args) = self._load_params()
- self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log']
+ self._legal_inputs = ['CHECKMODE', 'NO_LOG']
self.aliases = self._handle_aliases()
@@ -889,7 +888,7 @@ class AnsibleModule(object):
def _check_for_check_mode(self):
for (k,v) in self.params.iteritems():
- if k == '_ansible_check_mode':
+ if k == 'CHECKMODE':
if not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module does not support check mode")
if self.supports_check_mode:
@@ -897,13 +896,13 @@ class AnsibleModule(object):
def _check_for_no_log(self):
for (k,v) in self.params.iteritems():
- if k == '_ansible_no_log':
+ if k == 'NO_LOG':
self.no_log = self.boolean(v)
def _check_invalid_arguments(self):
for (k,v) in self.params.iteritems():
# these should be in legal inputs already
- #if k in ('_ansible_check_mode', '_ansible_no_log'):
+ #if k in ('CHECKMODE', 'NO_LOG'):
# continue
if k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
@@ -1076,11 +1075,20 @@ class AnsibleModule(object):
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
- params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
- if params is None:
- params = dict()
- return params
-
+ args = MODULE_ARGS
+ items = shlex.split(args)
+ params = {}
+ for x in items:
+ try:
+ (k, v) = x.split("=",1)
+ except Exception, e:
+ self.fail_json(msg="this module requires key=value arguments (%s)" % (items))
+ if k in params:
+ self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v))
+ params[k] = v
+ params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
+ params2.update(params)
+ return (params2, args)
def _log_invocation(self):
''' log that ansible ran the module '''
@@ -1201,17 +1209,13 @@ class AnsibleModule(object):
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
- for encoding in ("utf-8", "latin-1"):
+ for encoding in ("utf-8", "latin-1", "unicode_escape"):
try:
return json.dumps(data, encoding=encoding)
- # Old systems using old simplejson module does not support encoding keyword.
- except TypeError:
- try:
- new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
- except UnicodeDecodeError:
- continue
- return json.dumps(new_data)
- except UnicodeDecodeError:
+ # Old systems using simplejson module does not support encoding keyword.
+ except TypeError, e:
+ return json.dumps(data)
+ except UnicodeDecodeError, e:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
@@ -1448,7 +1452,7 @@ class AnsibleModule(object):
msg = None
st_in = None
- # Set a temporary env path if a prefix is passed
+ # Set a temporart env path if a prefix is passed
env=os.environ
if path_prefix:
env['PATH']="%s:%s" % (path_prefix, env['PATH'])
diff --git a/v2/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py
similarity index 100%
rename from v2/ansible/module_utils/cloudstack.py
rename to v1/ansible/module_utils/cloudstack.py
diff --git a/v2/ansible/module_utils/database.py b/v1/ansible/module_utils/database.py
similarity index 100%
rename from v2/ansible/module_utils/database.py
rename to v1/ansible/module_utils/database.py
diff --git a/v2/ansible/module_utils/ec2.py b/v1/ansible/module_utils/ec2.py
similarity index 100%
rename from v2/ansible/module_utils/ec2.py
rename to v1/ansible/module_utils/ec2.py
diff --git a/v2/ansible/module_utils/facts.py b/v1/ansible/module_utils/facts.py
similarity index 100%
rename from v2/ansible/module_utils/facts.py
rename to v1/ansible/module_utils/facts.py
diff --git a/v2/ansible/module_utils/gce.py b/v1/ansible/module_utils/gce.py
similarity index 100%
rename from v2/ansible/module_utils/gce.py
rename to v1/ansible/module_utils/gce.py
diff --git a/v2/ansible/module_utils/known_hosts.py b/v1/ansible/module_utils/known_hosts.py
similarity index 100%
rename from v2/ansible/module_utils/known_hosts.py
rename to v1/ansible/module_utils/known_hosts.py
diff --git a/v2/ansible/module_utils/openstack.py b/v1/ansible/module_utils/openstack.py
similarity index 100%
rename from v2/ansible/module_utils/openstack.py
rename to v1/ansible/module_utils/openstack.py
diff --git a/v2/ansible/module_utils/powershell.ps1 b/v1/ansible/module_utils/powershell.ps1
similarity index 97%
rename from v2/ansible/module_utils/powershell.ps1
rename to v1/ansible/module_utils/powershell.ps1
index 57d2c1b101c..ee7d3ddeca4 100644
--- a/v2/ansible/module_utils/powershell.ps1
+++ b/v1/ansible/module_utils/powershell.ps1
@@ -142,14 +142,14 @@ Function ConvertTo-Bool
return
}
-# Helper function to calculate md5 of a file in a way which powershell 3
+# Helper function to calculate a hash of a file in a way which powershell 3
# and above can handle:
-Function Get-FileMd5($path)
+Function Get-FileChecksum($path)
{
$hash = ""
If (Test-Path -PathType Leaf $path)
{
- $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider;
+ $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
diff --git a/v2/ansible/module_utils/rax.py b/v1/ansible/module_utils/rax.py
similarity index 100%
rename from v2/ansible/module_utils/rax.py
rename to v1/ansible/module_utils/rax.py
diff --git a/v2/ansible/module_utils/redhat.py b/v1/ansible/module_utils/redhat.py
similarity index 100%
rename from v2/ansible/module_utils/redhat.py
rename to v1/ansible/module_utils/redhat.py
diff --git a/v2/ansible/module_utils/splitter.py b/v1/ansible/module_utils/splitter.py
similarity index 100%
rename from v2/ansible/module_utils/splitter.py
rename to v1/ansible/module_utils/splitter.py
diff --git a/v2/ansible/module_utils/urls.py b/v1/ansible/module_utils/urls.py
similarity index 100%
rename from v2/ansible/module_utils/urls.py
rename to v1/ansible/module_utils/urls.py
diff --git a/lib/ansible/module_utils/vmware.py b/v1/ansible/module_utils/vmware.py
similarity index 100%
rename from lib/ansible/module_utils/vmware.py
rename to v1/ansible/module_utils/vmware.py
diff --git a/lib/ansible/runner/filter_plugins/__init__.py b/v1/ansible/modules/__init__.py
similarity index 100%
rename from lib/ansible/runner/filter_plugins/__init__.py
rename to v1/ansible/modules/__init__.py
diff --git a/v1/ansible/playbook/__init__.py b/v1/ansible/playbook/__init__.py
new file mode 100644
index 00000000000..24ba2d3c6e0
--- /dev/null
+++ b/v1/ansible/playbook/__init__.py
@@ -0,0 +1,874 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import ansible.inventory
+import ansible.constants as C
+import ansible.runner
+from ansible.utils.template import template
+from ansible import utils
+from ansible import errors
+from ansible.module_utils.splitter import split_args, unquote
+import ansible.callbacks
+import ansible.cache
+import os
+import shlex
+import collections
+from play import Play
+import StringIO
+import pipes
+
+# the setup cache stores all variables about a host
+# gathered during the setup step, while the vars cache
+# holds all other variables about a host
+SETUP_CACHE = ansible.cache.FactCache()
+VARS_CACHE = collections.defaultdict(dict)
+RESERVED_TAGS = ['all','tagged','untagged','always']
+
+
+class PlayBook(object):
+ '''
+ runs an ansible playbook, given as a datastructure or YAML filename.
+ A playbook is a deployment, config management, or automation based
+ set of commands to run in series.
+
+ multiple plays/tasks do not execute simultaneously, but tasks in each
+ pattern do execute in parallel (according to the number of forks
+ requested) among the hosts they address
+ '''
+
+ # *****************************************************
+
+ def __init__(self,
+ playbook = None,
+ host_list = C.DEFAULT_HOST_LIST,
+ module_path = None,
+ forks = C.DEFAULT_FORKS,
+ timeout = C.DEFAULT_TIMEOUT,
+ remote_user = C.DEFAULT_REMOTE_USER,
+ remote_pass = C.DEFAULT_REMOTE_PASS,
+ remote_port = None,
+ transport = C.DEFAULT_TRANSPORT,
+ private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
+ callbacks = None,
+ runner_callbacks = None,
+ stats = None,
+ extra_vars = None,
+ only_tags = None,
+ skip_tags = None,
+ subset = C.DEFAULT_SUBSET,
+ inventory = None,
+ check = False,
+ diff = False,
+ any_errors_fatal = False,
+ vault_password = False,
+ force_handlers = False,
+ # privilege escalation
+ become = C.DEFAULT_BECOME,
+ become_method = C.DEFAULT_BECOME_METHOD,
+ become_user = C.DEFAULT_BECOME_USER,
+ become_pass = None,
+ ):
+
+ """
+ playbook: path to a playbook file
+ host_list: path to a file like /etc/ansible/hosts
+ module_path: path to ansible modules, like /usr/share/ansible/
+ forks: desired level of parallelism
+ timeout: connection timeout
+ remote_user: run as this user if not specified in a particular play
+ remote_pass: use this remote password (for all plays) vs using SSH keys
+ remote_port: default remote port to use if not specified with the host or play
+ transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
+ callbacks output callbacks for the playbook
+ runner_callbacks: more callbacks, this time for the runner API
+ stats: holds aggregrate data about events occurring to each host
+ inventory: can be specified instead of host_list to use a pre-existing inventory object
+ check: don't change anything, just try to detect some potential changes
+ any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
+ force_handlers: continue to notify and run handlers even if a task fails
+ """
+
+ self.SETUP_CACHE = SETUP_CACHE
+ self.VARS_CACHE = VARS_CACHE
+
+ arguments = []
+ if playbook is None:
+ arguments.append('playbook')
+ if callbacks is None:
+ arguments.append('callbacks')
+ if runner_callbacks is None:
+ arguments.append('runner_callbacks')
+ if stats is None:
+ arguments.append('stats')
+ if arguments:
+ raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments))
+
+ if extra_vars is None:
+ extra_vars = {}
+ if only_tags is None:
+ only_tags = [ 'all' ]
+ if skip_tags is None:
+ skip_tags = []
+
+ self.check = check
+ self.diff = diff
+ self.module_path = module_path
+ self.forks = forks
+ self.timeout = timeout
+ self.remote_user = remote_user
+ self.remote_pass = remote_pass
+ self.remote_port = remote_port
+ self.transport = transport
+ self.callbacks = callbacks
+ self.runner_callbacks = runner_callbacks
+ self.stats = stats
+ self.extra_vars = extra_vars
+ self.global_vars = {}
+ self.private_key_file = private_key_file
+ self.only_tags = only_tags
+ self.skip_tags = skip_tags
+ self.any_errors_fatal = any_errors_fatal
+ self.vault_password = vault_password
+ self.force_handlers = force_handlers
+
+ self.become = become
+ self.become_method = become_method
+ self.become_user = become_user
+ self.become_pass = become_pass
+
+ self.callbacks.playbook = self
+ self.runner_callbacks.playbook = self
+
+ if inventory is None:
+ self.inventory = ansible.inventory.Inventory(host_list)
+ self.inventory.subset(subset)
+ else:
+ self.inventory = inventory
+
+ if self.module_path is not None:
+ utils.plugins.module_finder.add_directory(self.module_path)
+
+ self.basedir = os.path.dirname(playbook) or '.'
+ utils.plugins.push_basedir(self.basedir)
+
+ # let inventory know the playbook basedir so it can load more vars
+ self.inventory.set_playbook_basedir(self.basedir)
+
+ vars = extra_vars.copy()
+ vars['playbook_dir'] = os.path.abspath(self.basedir)
+ if self.inventory.basedir() is not None:
+ vars['inventory_dir'] = self.inventory.basedir()
+
+ if self.inventory.src() is not None:
+ vars['inventory_file'] = self.inventory.src()
+
+ self.filename = playbook
+ (self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars)
+ ansible.callbacks.load_callback_plugins()
+ ansible.callbacks.set_playbook(self.callbacks, self)
+
+ self._ansible_version = utils.version_info(gitinfo=True)
+
+ # *****************************************************
+
+ def _get_playbook_vars(self, play_ds, existing_vars):
+ '''
+ Gets the vars specified with the play and blends them
+ with any existing vars that have already been read in
+ '''
+ new_vars = existing_vars.copy()
+ if 'vars' in play_ds:
+ if isinstance(play_ds['vars'], dict):
+ new_vars.update(play_ds['vars'])
+ elif isinstance(play_ds['vars'], list):
+ for v in play_ds['vars']:
+ new_vars.update(v)
+ return new_vars
+
+ # *****************************************************
+
+ def _get_include_info(self, play_ds, basedir, existing_vars={}):
+ '''
+ Gets any key=value pairs specified with the included file
+ name and returns the merged vars along with the path
+ '''
+ new_vars = existing_vars.copy()
+ tokens = split_args(play_ds.get('include', ''))
+ for t in tokens[1:]:
+ try:
+ (k,v) = unquote(t).split("=", 1)
+ new_vars[k] = template(basedir, v, new_vars)
+ except ValueError, e:
+ raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
+
+ return (new_vars, unquote(tokens[0]))
+
+ # *****************************************************
+
+ def _get_playbook_vars_files(self, play_ds, existing_vars_files):
+ new_vars_files = list(existing_vars_files)
+ if 'vars_files' in play_ds:
+ new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files'])
+ return new_vars_files
+
+ # *****************************************************
+
+ def _extend_play_vars(self, play, vars={}):
+ '''
+ Extends the given play's variables with the additional specified vars.
+ '''
+
+ if 'vars' not in play or not play['vars']:
+ # someone left out or put an empty "vars:" entry in their playbook
+ return vars.copy()
+
+ play_vars = None
+ if isinstance(play['vars'], dict):
+ play_vars = play['vars'].copy()
+ play_vars.update(vars)
+ elif isinstance(play['vars'], list):
+ # nobody should really do this, but handle vars: a=1 b=2
+ play_vars = play['vars'][:]
+ play_vars.extend([{k:v} for k,v in vars.iteritems()])
+
+ return play_vars
+
+ # *****************************************************
+
+ def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
+ '''
+ run top level error checking on playbooks and allow them to include other playbooks.
+ '''
+
+ playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password)
+ accumulated_plays = []
+ play_basedirs = []
+
+ if type(playbook_data) != list:
+ raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data))
+
+ basedir = os.path.dirname(path) or '.'
+ utils.plugins.push_basedir(basedir)
+ for play in playbook_data:
+ if type(play) != dict:
+ raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
+
+ if 'include' in play:
+ # a playbook (list of plays) decided to include some other list of plays
+ # from another file. The result is a flat list of plays in the end.
+
+ play_vars = self._get_playbook_vars(play, vars)
+ play_vars_files = self._get_playbook_vars_files(play, vars_files)
+ inc_vars, inc_path = self._get_include_info(play, basedir, play_vars)
+ play_vars.update(inc_vars)
+
+ included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars))
+ (plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files)
+ for p in plays:
+ # support for parameterized play includes works by passing
+ # those variables along to the subservient play
+ p['vars'] = self._extend_play_vars(p, play_vars)
+ # now add in the vars_files
+ p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files)
+
+ accumulated_plays.extend(plays)
+ play_basedirs.extend(basedirs)
+
+ else:
+
+ # this is a normal (non-included play)
+ accumulated_plays.append(play)
+ play_basedirs.append(basedir)
+
+ return (accumulated_plays, play_basedirs)
+
+ # *****************************************************
+
+ def run(self):
+ ''' run all patterns in the playbook '''
+ plays = []
+ matched_tags_all = set()
+ unmatched_tags_all = set()
+
+ # loop through all patterns and run them
+ self.callbacks.on_start()
+ for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
+ play = Play(self, play_ds, play_basedir, vault_password=self.vault_password)
+ assert play is not None
+
+ matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
+
+ matched_tags_all = matched_tags_all | matched_tags
+ unmatched_tags_all = unmatched_tags_all | unmatched_tags
+
+ # Remove tasks we wish to skip
+ matched_tags = matched_tags - set(self.skip_tags)
+
+ # if we have matched_tags, the play must be run.
+ # if the play contains no tasks, assume we just want to gather facts
+ # in this case there are actually 3 meta tasks (handler flushes) not 0
+ # tasks, so that's why there's a check against 3
+ if (len(matched_tags) > 0 or len(play.tasks()) == 3):
+ plays.append(play)
+
+ # if the playbook is invoked with --tags or --skip-tags that don't
+ # exist at all in the playbooks then we need to raise an error so that
+ # the user can correct the arguments.
+ unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
+ (matched_tags_all | unmatched_tags_all))
+
+ for t in RESERVED_TAGS:
+ unknown_tags.discard(t)
+
+ if len(unknown_tags) > 0:
+ for t in RESERVED_TAGS:
+ unmatched_tags_all.discard(t)
+ msg = 'tag(s) not found in playbook: %s. possible values: %s'
+ unknown = ','.join(sorted(unknown_tags))
+ unmatched = ','.join(sorted(unmatched_tags_all))
+ raise errors.AnsibleError(msg % (unknown, unmatched))
+
+ for play in plays:
+ ansible.callbacks.set_play(self.callbacks, play)
+ ansible.callbacks.set_play(self.runner_callbacks, play)
+ if not self._run_play(play):
+ break
+
+ ansible.callbacks.set_play(self.callbacks, None)
+ ansible.callbacks.set_play(self.runner_callbacks, None)
+
+ # summarize the results
+ results = {}
+ for host in self.stats.processed.keys():
+ results[host] = self.stats.summarize(host)
+ return results
+
+ # *****************************************************
+
+ def _async_poll(self, poller, async_seconds, async_poll_interval):
+ ''' launch an async job, if poll_interval is set, wait for completion '''
+
+ results = poller.wait(async_seconds, async_poll_interval)
+
+ # mark any hosts that are still listed as started as failed
+ # since these likely got killed by async_wrapper
+ for host in poller.hosts_to_poll:
+ reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
+ self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id'])
+ results['contacted'][host] = reason
+
+ return results
+
+ # *****************************************************
+
+ def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
+ ''' returns a list of hosts that haven't failed and aren't dark '''
+
+ return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
+
+ # *****************************************************
+
+ def _run_task_internal(self, task, include_failed=False):
+ ''' run a particular module step in a playbook '''
+
+ hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
+ self.inventory.restrict_to(hosts)
+
+ runner = ansible.runner.Runner(
+ pattern=task.play.hosts,
+ inventory=self.inventory,
+ module_name=task.module_name,
+ module_args=task.module_args,
+ forks=self.forks,
+ remote_pass=self.remote_pass,
+ module_path=self.module_path,
+ timeout=self.timeout,
+ remote_user=task.remote_user,
+ remote_port=task.play.remote_port,
+ module_vars=task.module_vars,
+ play_vars=task.play_vars,
+ play_file_vars=task.play_file_vars,
+ role_vars=task.role_vars,
+ role_params=task.role_params,
+ default_vars=task.default_vars,
+ extra_vars=self.extra_vars,
+ private_key_file=self.private_key_file,
+ setup_cache=self.SETUP_CACHE,
+ vars_cache=self.VARS_CACHE,
+ basedir=task.play.basedir,
+ conditional=task.when,
+ callbacks=self.runner_callbacks,
+ transport=task.transport,
+ is_playbook=True,
+ check=self.check,
+ diff=self.diff,
+ environment=task.environment,
+ complex_args=task.args,
+ accelerate=task.play.accelerate,
+ accelerate_port=task.play.accelerate_port,
+ accelerate_ipv6=task.play.accelerate_ipv6,
+ error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
+ vault_pass = self.vault_password,
+ run_hosts=hosts,
+ no_log=task.no_log,
+ run_once=task.run_once,
+ become=task.become,
+ become_method=task.become_method,
+ become_user=task.become_user,
+ become_pass=task.become_pass,
+ )
+
+ runner.module_vars.update({'play_hosts': hosts})
+ runner.module_vars.update({'ansible_version': self._ansible_version})
+
+ if task.async_seconds == 0:
+ results = runner.run()
+ else:
+ results, poller = runner.run_async(task.async_seconds)
+ self.stats.compute(results)
+ if task.async_poll_interval > 0:
+ # if not polling, playbook requested fire and forget, so don't poll
+ results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
+ else:
+ for (host, res) in results.get('contacted', {}).iteritems():
+ self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id'])
+
+ contacted = results.get('contacted',{})
+ dark = results.get('dark', {})
+
+ self.inventory.lift_restriction()
+
+ if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
+ return None
+
+ return results
+
+ # *****************************************************
+
+ def _run_task(self, play, task, is_handler):
+ ''' run a single task in the playbook and recursively run any subtasks. '''
+
+ ansible.callbacks.set_task(self.callbacks, task)
+ ansible.callbacks.set_task(self.runner_callbacks, task)
+
+ if task.role_name:
+ name = '%s | %s' % (task.role_name, task.name)
+ else:
+ name = task.name
+
+ try:
+ # v1 HACK: we don't have enough information to template many names
+ # at this point. Rather than making this work for all cases in
+ # v1, just make this degrade gracefully. Will fix in v2
+ name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False)
+ except:
+ pass
+
+ self.callbacks.on_task_start(name, is_handler)
+ if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task:
+ ansible.callbacks.set_task(self.callbacks, None)
+ ansible.callbacks.set_task(self.runner_callbacks, None)
+ return True
+
+ # template ignore_errors
+ # TODO: Is this needed here? cond is templated again in
+ # check_conditional after some more manipulations.
+ # TODO: we don't have enough information here to template cond either
+ # (see note on templating name above)
+ cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False)
+ task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
+
+ # load up an appropriate ansible runner to run the task in parallel
+ include_failed = is_handler and play.force_handlers
+ results = self._run_task_internal(task, include_failed=include_failed)
+
+ # if no hosts are matched, carry on
+ hosts_remaining = True
+ if results is None:
+ hosts_remaining = False
+ results = {}
+
+ contacted = results.get('contacted', {})
+ self.stats.compute(results, ignore_errors=task.ignore_errors)
+
+ def _register_play_vars(host, result):
+ # when 'register' is used, persist the result in the vars cache
+ # rather than the setup cache - vars should be transient between
+ # playbook executions
+ if 'stdout' in result and 'stdout_lines' not in result:
+ result['stdout_lines'] = result['stdout'].splitlines()
+ utils.update_hash(self.VARS_CACHE, host, {task.register: result})
+
+ def _save_play_facts(host, facts):
+ # saves play facts in SETUP_CACHE, unless the module executed was
+ # set_fact, in which case we add them to the VARS_CACHE
+ if task.module_name in ('set_fact', 'include_vars'):
+ utils.update_hash(self.VARS_CACHE, host, facts)
+ else:
+ utils.update_hash(self.SETUP_CACHE, host, facts)
+
+ # add facts to the global setup cache
+ for host, result in contacted.iteritems():
+ if 'results' in result:
+ # task ran with_ lookup plugin, so facts are encapsulated in
+ # multiple list items in the results key
+ for res in result['results']:
+ if type(res) == dict:
+ facts = res.get('ansible_facts', {})
+ _save_play_facts(host, facts)
+ else:
+ # when facts are returned, persist them in the setup cache
+ facts = result.get('ansible_facts', {})
+ _save_play_facts(host, facts)
+
+ # if requested, save the result into the registered variable name
+ if task.register:
+ _register_play_vars(host, result)
+
+ # also have to register some failed, but ignored, tasks
+ if task.ignore_errors and task.register:
+ failed = results.get('failed', {})
+ for host, result in failed.iteritems():
+ _register_play_vars(host, result)
+
+ # flag which notify handlers need to be run
+ if len(task.notify) > 0:
+ for host, results in results.get('contacted',{}).iteritems():
+ if results.get('changed', False):
+ for handler_name in task.notify:
+ self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host)
+
+ ansible.callbacks.set_task(self.callbacks, None)
+ ansible.callbacks.set_task(self.runner_callbacks, None)
+ return hosts_remaining
+
+ # *****************************************************
+
+ def _flag_handler(self, play, handler_name, host):
+ '''
+ if a task has any notify elements, flag handlers for run
+ at end of execution cycle for hosts that have indicated
+ changes have been made
+ '''
+
+ found = False
+ for x in play.handlers():
+ if handler_name == template(play.basedir, x.name, x.module_vars):
+ found = True
+ self.callbacks.on_notify(host, x.name)
+ x.notified_by.append(host)
+ if not found:
+ raise errors.AnsibleError("change handler (%s) is not defined" % handler_name)
+
+ # *****************************************************
+
+ def _do_setup_step(self, play):
+ ''' get facts from the remote system '''
+
+ host_list = self._trim_unavailable_hosts(play._play_hosts)
+
+ if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart':
+ host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]]
+ if len(host_list) == 0:
+ return {}
+ elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'):
+ return {}
+
+ self.callbacks.on_setup()
+ self.inventory.restrict_to(host_list)
+
+ ansible.callbacks.set_task(self.callbacks, None)
+ ansible.callbacks.set_task(self.runner_callbacks, None)
+
+ # push any variables down to the system
+ setup_results = ansible.runner.Runner(
+ basedir=self.basedir,
+ pattern=play.hosts,
+ module_name='setup',
+ module_args={},
+ inventory=self.inventory,
+ forks=self.forks,
+ module_path=self.module_path,
+ timeout=self.timeout,
+ remote_user=play.remote_user,
+ remote_pass=self.remote_pass,
+ remote_port=play.remote_port,
+ private_key_file=self.private_key_file,
+ setup_cache=self.SETUP_CACHE,
+ vars_cache=self.VARS_CACHE,
+ callbacks=self.runner_callbacks,
+ become=play.become,
+ become_method=play.become_method,
+ become_user=play.become_user,
+ become_pass=self.become_pass,
+ vault_pass=self.vault_password,
+ transport=play.transport,
+ is_playbook=True,
+ module_vars=play.vars,
+ play_vars=play.vars,
+ play_file_vars=play.vars_file_vars,
+ role_vars=play.role_vars,
+ default_vars=play.default_vars,
+ check=self.check,
+ diff=self.diff,
+ accelerate=play.accelerate,
+ accelerate_port=play.accelerate_port,
+ ).run()
+ self.stats.compute(setup_results, setup=True)
+
+ self.inventory.lift_restriction()
+
+ # now for each result, load into the setup cache so we can
+ # let runner template out future commands
+ setup_ok = setup_results.get('contacted', {})
+ for (host, result) in setup_ok.iteritems():
+ utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
+ utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {}))
+ return setup_results
+
+ # *****************************************************
+
+
+ def generate_retry_inventory(self, replay_hosts):
+ '''
+ called by /usr/bin/ansible when a playbook run fails. It generates an inventory
+ that allows re-running on ONLY the failed hosts. This may duplicate some
+ variable information in group_vars/host_vars but that is ok, and expected.
+ '''
+
+ buf = StringIO.StringIO()
+ for x in replay_hosts:
+ buf.write("%s\n" % x)
+ basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH)
+ filename = "%s.retry" % os.path.basename(self.filename)
+ filename = filename.replace(".yml","")
+ filename = os.path.join(basedir, filename)
+
+ try:
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+
+ fd = open(filename, 'w')
+ fd.write(buf.getvalue())
+ fd.close()
+ except:
+ ansible.callbacks.display(
+ "\nERROR: could not create retry file. Check the value of \n"
+ + "the configuration variable 'retry_files_save_path' or set \n"
+ + "'retry_files_enabled' to False to avoid this message.\n",
+ color='red'
+ )
+ return None
+
+ return filename
+
+ # *****************************************************
+ def tasks_to_run_in_play(self, play):
+
+ tasks = []
+
+ for task in play.tasks():
+ # only run the task if the requested tags match or has 'always' tag
+ u = set(['untagged'])
+ task_set = set(task.tags)
+
+ if 'always' in task.tags:
+ should_run = True
+ else:
+ if 'all' in self.only_tags:
+ should_run = True
+ else:
+ should_run = False
+ if 'tagged' in self.only_tags:
+ if task_set != u:
+ should_run = True
+ elif 'untagged' in self.only_tags:
+ if task_set == u:
+ should_run = True
+ else:
+ if task_set.intersection(self.only_tags):
+ should_run = True
+
+ # Check for tags that we need to skip
+ if 'all' in self.skip_tags:
+ should_run = False
+ else:
+ if 'tagged' in self.skip_tags:
+ if task_set != u:
+ should_run = False
+ elif 'untagged' in self.skip_tags:
+ if task_set == u:
+ should_run = False
+ else:
+ if should_run:
+ if task_set.intersection(self.skip_tags):
+ should_run = False
+
+ if should_run:
+ tasks.append(task)
+
+ return tasks
+
+ # *****************************************************
+ def _run_play(self, play):
+ ''' run a list of tasks for a given pattern, in order '''
+
+ self.callbacks.on_play_start(play.name)
+ # Get the hosts for this play
+ play._play_hosts = self.inventory.list_hosts(play.hosts)
+ # if no hosts matches this play, drop out
+ if not play._play_hosts:
+ self.callbacks.on_no_hosts_matched()
+ return True
+
+ # get facts from system
+ self._do_setup_step(play)
+
+ # now with that data, handle contentional variable file imports!
+ all_hosts = self._trim_unavailable_hosts(play._play_hosts)
+ play.update_vars_files(all_hosts, vault_password=self.vault_password)
+ hosts_count = len(all_hosts)
+
+ if play.serial.endswith("%"):
+
+ # This is a percentage, so calculate it based on the
+ # number of hosts
+ serial_pct = int(play.serial.replace("%",""))
+ serial = int((serial_pct/100.0) * len(all_hosts))
+
+ # Ensure that no matter how small the percentage, serial
+ # can never fall below 1, so that things actually happen
+ serial = max(serial, 1)
+ else:
+ serial = int(play.serial)
+
+ serialized_batch = []
+ if serial <= 0:
+ serialized_batch = [all_hosts]
+ else:
+ # do N forks all the way through before moving to next
+ while len(all_hosts) > 0:
+ play_hosts = []
+ for x in range(serial):
+ if len(all_hosts) > 0:
+ play_hosts.append(all_hosts.pop(0))
+ serialized_batch.append(play_hosts)
+
+ task_errors = False
+ for on_hosts in serialized_batch:
+
+ # restrict the play to just the hosts we have in our on_hosts block that are
+ # available.
+ play._play_hosts = self._trim_unavailable_hosts(on_hosts)
+ self.inventory.also_restrict_to(on_hosts)
+
+ for task in self.tasks_to_run_in_play(play):
+
+ if task.meta is not None:
+ # meta tasks can force handlers to run mid-play
+ if task.meta == 'flush_handlers':
+ self.run_handlers(play)
+
+ # skip calling the handler till the play is finished
+ continue
+
+ if not self._run_task(play, task, False):
+ # whether no hosts matched is fatal or not depends if it was on the initial step.
+ # if we got exactly no hosts on the first step (setup!) then the host group
+ # just didn't match anything and that's ok
+ return False
+
+ # Get a new list of what hosts are left as available, the ones that
+ # did not go fail/dark during the task
+ host_list = self._trim_unavailable_hosts(play._play_hosts)
+
+ # Set max_fail_pct to 0, So if any hosts fails, bail out
+ if task.any_errors_fatal and len(host_list) < hosts_count:
+ play.max_fail_pct = 0
+
+ # If threshold for max nodes failed is exceeded, bail out.
+ if play.serial > 0:
+ # if serial is set, we need to shorten the size of host_count
+ play_count = len(play._play_hosts)
+ if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count):
+ host_list = None
+ else:
+ if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
+ host_list = None
+
+ # if no hosts remain, drop out
+ if not host_list:
+ if play.force_handlers:
+ task_errors = True
+ break
+ else:
+ self.callbacks.on_no_hosts_remaining()
+ return False
+
+ # lift restrictions after each play finishes
+ self.inventory.lift_also_restriction()
+
+ if task_errors and not play.force_handlers:
+ # if there were failed tasks and handler execution
+ # is not forced, quit the play with an error
+ return False
+ else:
+ # no errors, go ahead and execute all handlers
+ if not self.run_handlers(play):
+ return False
+
+ return True
+
+
+ def run_handlers(self, play):
+ on_hosts = play._play_hosts
+ hosts_count = len(on_hosts)
+ for task in play.tasks():
+ if task.meta is not None:
+
+ fired_names = {}
+ for handler in play.handlers():
+ if len(handler.notified_by) > 0:
+ self.inventory.restrict_to(handler.notified_by)
+
+ # Resolve the variables first
+ handler_name = template(play.basedir, handler.name, handler.module_vars)
+ if handler_name not in fired_names:
+ self._run_task(play, handler, True)
+ # prevent duplicate handler includes from running more than once
+ fired_names[handler_name] = 1
+
+ host_list = self._trim_unavailable_hosts(play._play_hosts)
+ if handler.any_errors_fatal and len(host_list) < hosts_count:
+ play.max_fail_pct = 0
+ if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
+ host_list = None
+ if not host_list and not play.force_handlers:
+ self.callbacks.on_no_hosts_remaining()
+ return False
+
+ self.inventory.lift_restriction()
+ new_list = handler.notified_by[:]
+ for host in handler.notified_by:
+ if host in on_hosts:
+ while host in new_list:
+ new_list.remove(host)
+ handler.notified_by = new_list
+
+ continue
+
+ return True
diff --git a/v1/ansible/playbook/play.py b/v1/ansible/playbook/play.py
new file mode 100644
index 00000000000..6ee85e0bf48
--- /dev/null
+++ b/v1/ansible/playbook/play.py
@@ -0,0 +1,949 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+#############################################
+
+from ansible.utils.template import template
+from ansible import utils
+from ansible import errors
+from ansible.playbook.task import Task
+from ansible.module_utils.splitter import split_args, unquote
+import ansible.constants as C
+import pipes
+import shlex
+import os
+import sys
+import uuid
+
+
+class Play(object):
+
+ _pb_common = [
+ 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
+ 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
+ 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
+ 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
+ 'vault_password',
+ ]
+
+ __slots__ = _pb_common + [
+ '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir',
+ 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port',
+ 'role_vars', 'transport', 'vars_file_vars',
+ ]
+
+ # to catch typos and so forth -- these are userland names
+ # and don't line up 1:1 with how they are stored
+ VALID_KEYS = frozenset(_pb_common + [
+ 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks',
+ 'pre_tasks', 'role_names', 'tasks', 'user',
+ ])
+
+ # *************************************************
+
+ def __init__(self, playbook, ds, basedir, vault_password=None):
+ ''' constructor loads from a play datastructure '''
+
+ for x in ds.keys():
+ if not x in Play.VALID_KEYS:
+ raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x)
+
+ # allow all playbook keys to be set by --extra-vars
+ self.vars = ds.get('vars', {})
+ self.vars_prompt = ds.get('vars_prompt', {})
+ self.playbook = playbook
+ self.vars = self._get_vars()
+ self.vars_file_vars = dict() # these are vars read in from vars_files:
+ self.role_vars = dict() # these are vars read in from vars/main.yml files in roles
+ self.basedir = basedir
+ self.roles = ds.get('roles', None)
+ self.tags = ds.get('tags', None)
+ self.vault_password = vault_password
+ self.environment = ds.get('environment', {})
+
+ if self.tags is None:
+ self.tags = []
+ elif type(self.tags) in [ str, unicode ]:
+ self.tags = self.tags.split(",")
+ elif type(self.tags) != list:
+ self.tags = []
+
+ # make sure we have some special internal variables set, which
+ # we use later when loading tasks and handlers
+ load_vars = dict()
+ load_vars['playbook_dir'] = os.path.abspath(self.basedir)
+ if self.playbook.inventory.basedir() is not None:
+ load_vars['inventory_dir'] = self.playbook.inventory.basedir()
+ if self.playbook.inventory.src() is not None:
+ load_vars['inventory_file'] = self.playbook.inventory.src()
+
+ # We first load the vars files from the datastructure
+ # so we have the default variables to pass into the roles
+ self.vars_files = ds.get('vars_files', [])
+ if not isinstance(self.vars_files, list):
+ raise errors.AnsibleError('vars_files must be a list')
+ processed_vars_files = self._update_vars_files_for_host(None)
+
+ # now we load the roles into the datastructure
+ self.included_roles = []
+ ds = self._load_roles(self.roles, ds)
+
+ # and finally re-process the vars files as they may have been updated
+ # by the included roles, but exclude any which have been processed
+ self.vars_files = utils.list_difference(ds.get('vars_files', []), processed_vars_files)
+ if not isinstance(self.vars_files, list):
+ raise errors.AnsibleError('vars_files must be a list')
+
+ self._update_vars_files_for_host(None)
+
+ # template everything to be efficient, but do not pre-mature template
+ # tasks/handlers as they may have inventory scope overrides. We also
+ # create a set of temporary variables for templating, so we don't
+ # trample on the existing vars structures
+ _tasks = ds.pop('tasks', [])
+ _handlers = ds.pop('handlers', [])
+
+ temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
+ temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
+
+ try:
+ ds = template(basedir, ds, temp_vars)
+ except errors.AnsibleError, e:
+ utils.warning("non fatal error while trying to template play variables: %s" % (str(e)))
+
+ ds['tasks'] = _tasks
+ ds['handlers'] = _handlers
+
+ self._ds = ds
+
+ hosts = ds.get('hosts')
+ if hosts is None:
+ raise errors.AnsibleError('hosts declaration is required')
+ elif isinstance(hosts, list):
+ try:
+ hosts = ';'.join(hosts)
+ except TypeError,e:
+ raise errors.AnsibleError('improper host declaration: %s' % str(e))
+
+ self.serial = str(ds.get('serial', 0))
+ self.hosts = hosts
+ self.name = ds.get('name', self.hosts)
+ self._tasks = ds.get('tasks', [])
+ self._handlers = ds.get('handlers', [])
+ self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user))
+ self.remote_port = ds.get('port', self.playbook.remote_port)
+ self.transport = ds.get('connection', self.playbook.transport)
+ self.remote_port = self.remote_port
+ self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false'))
+ self.accelerate = utils.boolean(ds.get('accelerate', 'false'))
+ self.accelerate_port = ds.get('accelerate_port', None)
+ self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
+ self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
+ self.no_log = utils.boolean(ds.get('no_log', 'false'))
+ self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
+
+ # Fail out if user specifies conflicting privilege escalations
+ if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
+ raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together')
+ if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')):
+ raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together')
+ if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')):
+ raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
+
+ # become settings are inherited and updated normally
+ self.become = ds.get('become', self.playbook.become)
+ self.become_method = ds.get('become_method', self.playbook.become_method)
+ self.become_user = ds.get('become_user', self.playbook.become_user)
+
+ # Make sure current play settings are reflected in become fields
+ if 'sudo' in ds:
+ self.become=ds['sudo']
+ self.become_method='sudo'
+ if 'sudo_user' in ds:
+ self.become_user=ds['sudo_user']
+ elif 'su' in ds:
+ self.become=True
+ self.become=ds['su']
+ self.become_method='su'
+ if 'su_user' in ds:
+ self.become_user=ds['su_user']
+
+ # gather_facts is not a simple boolean, as None means that a 'smart'
+ # fact gathering mode will be used, so we need to be careful here as
+ # calling utils.boolean(None) returns False
+ self.gather_facts = ds.get('gather_facts', None)
+ if self.gather_facts is not None:
+ self.gather_facts = utils.boolean(self.gather_facts)
+
+ load_vars['role_names'] = ds.get('role_names', [])
+
+ self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars)
+ self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars)
+
+ # apply any missing tags to role tasks
+ self._late_merge_role_tags()
+
+ # place holder for the discovered hosts to be used in this play
+ self._play_hosts = None
+
+ # *************************************************
+
+ def _get_role_path(self, role):
+ """
+ Returns the path on disk to the directory containing
+ the role directories like tasks, templates, etc. Also
+ returns any variables that were included with the role
+ """
+ orig_path = template(self.basedir,role,self.vars)
+
+ role_vars = {}
+ if type(orig_path) == dict:
+ # what, not a path?
+ role_name = orig_path.get('role', None)
+ if role_name is None:
+ raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path)
+ role_vars = orig_path
+ else:
+ role_name = utils.role_spec_parse(orig_path)["name"]
+
+ role_path = None
+
+ possible_paths = [
+ utils.path_dwim(self.basedir, os.path.join('roles', role_name)),
+ utils.path_dwim(self.basedir, role_name)
+ ]
+
+ if C.DEFAULT_ROLES_PATH:
+ search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep)
+ for loc in search_locations:
+ loc = os.path.expanduser(loc)
+ possible_paths.append(utils.path_dwim(loc, role_name))
+
+ for path_option in possible_paths:
+ if os.path.isdir(path_option):
+ role_path = path_option
+ break
+
+ if role_path is None:
+ raise errors.AnsibleError("cannot find role in %s" % " or ".join(possible_paths))
+
+ return (role_path, role_vars)
+
+ def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0):
+ # this number is arbitrary, but it seems sane
+ if level > 20:
+ raise errors.AnsibleError("too many levels of recursion while resolving role dependencies")
+ for role in roles:
+ role_path,role_vars = self._get_role_path(role)
+
+ # save just the role params for this role, which exclude the special
+ # keywords 'role', 'tags', and 'when'.
+ role_params = role_vars.copy()
+ for item in ('role', 'tags', 'when'):
+ if item in role_params:
+ del role_params[item]
+
+ role_vars = utils.combine_vars(passed_vars, role_vars)
+
+ vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')))
+ vars_data = {}
+ if os.path.isfile(vars):
+ vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
+ if vars_data:
+ if not isinstance(vars_data, dict):
+ raise errors.AnsibleError("vars from '%s' are not a dict" % vars)
+ role_vars = utils.combine_vars(vars_data, role_vars)
+
+ defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')))
+ defaults_data = {}
+ if os.path.isfile(defaults):
+ defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
+
+ # the meta directory contains the yaml that should
+ # hold the list of dependencies (if any)
+ meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')))
+ if os.path.isfile(meta):
+ data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
+ if data:
+ dependencies = data.get('dependencies',[])
+ if dependencies is None:
+ dependencies = []
+ for dep in dependencies:
+ allow_dupes = False
+ (dep_path,dep_vars) = self._get_role_path(dep)
+
+ # save the dep params, just as we did above
+ dep_params = dep_vars.copy()
+ for item in ('role', 'tags', 'when'):
+ if item in dep_params:
+ del dep_params[item]
+
+ meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta')))
+ if os.path.isfile(meta):
+ meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
+ if meta_data:
+ allow_dupes = utils.boolean(meta_data.get('allow_duplicates',''))
+
+ # if any tags were specified as role/dep variables, merge
+ # them into the current dep_vars so they're passed on to any
+ # further dependencies too, and so we only have one place
+ # (dep_vars) to look for tags going forward
+ def __merge_tags(var_obj):
+ old_tags = dep_vars.get('tags', [])
+ if isinstance(old_tags, basestring):
+ old_tags = [old_tags, ]
+ if isinstance(var_obj, dict):
+ new_tags = var_obj.get('tags', [])
+ if isinstance(new_tags, basestring):
+ new_tags = [new_tags, ]
+ else:
+ new_tags = []
+ return list(set(old_tags).union(set(new_tags)))
+
+ dep_vars['tags'] = __merge_tags(role_vars)
+ dep_vars['tags'] = __merge_tags(passed_vars)
+
+ # if tags are set from this role, merge them
+ # into the tags list for the dependent role
+ if "tags" in passed_vars:
+ for included_role_dep in dep_stack:
+ included_dep_name = included_role_dep[0]
+ included_dep_vars = included_role_dep[2]
+ if included_dep_name == dep:
+ if "tags" in included_dep_vars:
+ included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"])))
+ else:
+ included_dep_vars["tags"] = passed_vars["tags"][:]
+
+ dep_vars = utils.combine_vars(passed_vars, dep_vars)
+ dep_vars = utils.combine_vars(role_vars, dep_vars)
+
+ vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars')))
+ vars_data = {}
+ if os.path.isfile(vars):
+ vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
+ if vars_data:
+ dep_vars = utils.combine_vars(dep_vars, vars_data)
+ pass
+
+ defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults')))
+ dep_defaults_data = {}
+ if os.path.isfile(defaults):
+ dep_defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
+ if 'role' in dep_vars:
+ del dep_vars['role']
+
+ if not allow_dupes:
+ if dep in self.included_roles:
+ # skip back to the top, since we don't want to
+ # do anything else with this role
+ continue
+ else:
+ self.included_roles.append(dep)
+
+ def _merge_conditional(cur_conditionals, new_conditionals):
+ if isinstance(new_conditionals, (basestring, bool)):
+ cur_conditionals.append(new_conditionals)
+ elif isinstance(new_conditionals, list):
+ cur_conditionals.extend(new_conditionals)
+
+ # pass along conditionals from roles to dep roles
+ passed_when = passed_vars.get('when')
+ role_when = role_vars.get('when')
+ dep_when = dep_vars.get('when')
+
+ tmpcond = []
+ _merge_conditional(tmpcond, passed_when)
+ _merge_conditional(tmpcond, role_when)
+ _merge_conditional(tmpcond, dep_when)
+
+ if len(tmpcond) > 0:
+ dep_vars['when'] = tmpcond
+
+ self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1)
+ dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data])
+
+ # only add the current role when we're at the top level,
+ # otherwise we'll end up in a recursive loop
+ if level == 0:
+ self.included_roles.append(role)
+ dep_stack.append([role, role_path, role_vars, role_params, defaults_data])
+ return dep_stack
+
+ def _load_role_vars_files(self, vars_files):
+ # process variables stored in vars/main.yml files
+ role_vars = {}
+ for filename in vars_files:
+ if os.path.exists(filename):
+ new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
+ if new_vars:
+ if type(new_vars) != dict:
+ raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars)))
+ role_vars = utils.combine_vars(role_vars, new_vars)
+
+ return role_vars
+
+ def _load_role_defaults(self, defaults_files):
+ # process default variables
+ default_vars = {}
+ for filename in defaults_files:
+ if os.path.exists(filename):
+ new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
+ if new_default_vars:
+ if type(new_default_vars) != dict:
+ raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars)))
+ default_vars = utils.combine_vars(default_vars, new_default_vars)
+
+ return default_vars
+
+ def _load_roles(self, roles, ds):
+ # a role is a name that auto-includes the following if they exist
+ # /tasks/main.yml
+ # /handlers/main.yml
+ # /vars/main.yml
+ # /library
+ # and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found
+
+ if roles is None:
+ roles = []
+ if type(roles) != list:
+ raise errors.AnsibleError("value of 'roles:' must be a list")
+
+ new_tasks = []
+ new_handlers = []
+ role_vars_files = []
+ defaults_files = []
+
+ pre_tasks = ds.get('pre_tasks', None)
+ if type(pre_tasks) != list:
+ pre_tasks = []
+ for x in pre_tasks:
+ new_tasks.append(x)
+
+ # flush handlers after pre_tasks
+ new_tasks.append(dict(meta='flush_handlers'))
+
+ roles = self._build_role_dependencies(roles, [], {})
+
+ # give each role an uuid and
+ # make role_path available as variable to the task
+ for idx, val in enumerate(roles):
+ this_uuid = str(uuid.uuid4())
+ roles[idx][-3]['role_uuid'] = this_uuid
+ roles[idx][-3]['role_path'] = roles[idx][1]
+
+ role_names = []
+
+ for (role, role_path, role_vars, role_params, default_vars) in roles:
+ # special vars must be extracted from the dict to the included tasks
+ special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ]
+ special_vars = {}
+ for k in special_keys:
+ if k in role_vars:
+ special_vars[k] = role_vars[k]
+
+ task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks'))
+ handler_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'handlers'))
+ vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))
+ meta_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))
+ defaults_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))
+
+ task = self._resolve_main(task_basepath)
+ handler = self._resolve_main(handler_basepath)
+ vars_file = self._resolve_main(vars_basepath)
+ meta_file = self._resolve_main(meta_basepath)
+ defaults_file = self._resolve_main(defaults_basepath)
+
+ library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library'))
+
+ missing = lambda f: not os.path.isfile(f)
+ if missing(task) and missing(handler) and missing(vars_file) and missing(defaults_file) and missing(meta_file) and not os.path.isdir(library):
+ raise errors.AnsibleError("found role at %s, but cannot find %s or %s or %s or %s or %s or %s" % (role_path, task, handler, vars_file, defaults_file, meta_file, library))
+
+ if isinstance(role, dict):
+ role_name = role['role']
+ else:
+ role_name = utils.role_spec_parse(role)["name"]
+
+ role_names.append(role_name)
+ if os.path.isfile(task):
+ nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name)
+ for k in special_keys:
+ if k in special_vars:
+ nt[k] = special_vars[k]
+ new_tasks.append(nt)
+ if os.path.isfile(handler):
+ nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name)
+ for k in special_keys:
+ if k in special_vars:
+ nt[k] = special_vars[k]
+ new_handlers.append(nt)
+ if os.path.isfile(vars_file):
+ role_vars_files.append(vars_file)
+ if os.path.isfile(defaults_file):
+ defaults_files.append(defaults_file)
+ if os.path.isdir(library):
+ utils.plugins.module_finder.add_directory(library)
+
+ tasks = ds.get('tasks', None)
+ post_tasks = ds.get('post_tasks', None)
+ handlers = ds.get('handlers', None)
+ vars_files = ds.get('vars_files', None)
+
+ if type(tasks) != list:
+ tasks = []
+ if type(handlers) != list:
+ handlers = []
+ if type(vars_files) != list:
+ vars_files = []
+ if type(post_tasks) != list:
+ post_tasks = []
+
+ new_tasks.extend(tasks)
+ # flush handlers after tasks + role tasks
+ new_tasks.append(dict(meta='flush_handlers'))
+ new_tasks.extend(post_tasks)
+ # flush handlers after post tasks
+ new_tasks.append(dict(meta='flush_handlers'))
+
+ new_handlers.extend(handlers)
+
+ ds['tasks'] = new_tasks
+ ds['handlers'] = new_handlers
+ ds['role_names'] = role_names
+
+ self.role_vars = self._load_role_vars_files(role_vars_files)
+ self.default_vars = self._load_role_defaults(defaults_files)
+
+ return ds
+
+ # *************************************************
+
+ def _resolve_main(self, basepath):
+ ''' flexibly handle variations in main filenames '''
+ # these filenames are acceptable:
+ mains = (
+ os.path.join(basepath, 'main'),
+ os.path.join(basepath, 'main.yml'),
+ os.path.join(basepath, 'main.yaml'),
+ os.path.join(basepath, 'main.json'),
+ )
+ if sum([os.path.isfile(x) for x in mains]) > 1:
+ raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
+ else:
+ for m in mains:
+ if os.path.isfile(m):
+ return m # exactly one main file
+ return mains[0] # zero mains (we still need to return something)
+
+ # *************************************************
+
+ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None,
+ additional_conditions=None, original_file=None, role_name=None):
+ ''' handle task and handler include statements '''
+
+ results = []
+ if tasks is None:
+ # support empty handler files, and the like.
+ tasks = []
+ if additional_conditions is None:
+ additional_conditions = []
+ if vars is None:
+ vars = {}
+ if role_params is None:
+ role_params = {}
+ if default_vars is None:
+ default_vars = {}
+ if become_vars is None:
+ become_vars = {}
+
+ old_conditions = list(additional_conditions)
+
+ for x in tasks:
+
+ # prevent assigning the same conditions to each task on an include
+ included_additional_conditions = list(old_conditions)
+
+ if not isinstance(x, dict):
+ raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file))
+
+ # evaluate privilege escalation vars for current and child tasks
+ included_become_vars = {}
+ for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]:
+ if k in x:
+ included_become_vars[k] = x[k]
+ elif k in become_vars:
+ included_become_vars[k] = become_vars[k]
+ x[k] = become_vars[k]
+
+ task_vars = vars.copy()
+ if original_file:
+ task_vars['_original_file'] = original_file
+
+ if 'meta' in x:
+ if x['meta'] == 'flush_handlers':
+ if role_name and 'role_name' not in x:
+ x['role_name'] = role_name
+ results.append(Task(self, x, module_vars=task_vars, role_name=role_name))
+ continue
+
+ if 'include' in x:
+ tokens = split_args(str(x['include']))
+ included_additional_conditions = list(additional_conditions)
+ include_vars = {}
+ for k in x:
+ if k.startswith("with_"):
+ if original_file:
+ offender = " (in %s)" % original_file
+ else:
+ offender = ""
+ utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True)
+ elif k.startswith("when_"):
+ utils.deprecated("\"when_:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True)
+ elif k == 'when':
+ if isinstance(x[k], (basestring, bool)):
+ included_additional_conditions.append(x[k])
+ elif type(x[k]) is list:
+ included_additional_conditions.extend(x[k])
+ elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"):
+ continue
+ else:
+ include_vars[k] = x[k]
+
+ # get any role parameters specified
+ role_params = x.get('role_params', {})
+
+ # get any role default variables specified
+ default_vars = x.get('default_vars', {})
+ if not default_vars:
+ default_vars = self.default_vars
+ else:
+ default_vars = utils.combine_vars(self.default_vars, default_vars)
+
+ # append the vars defined with the include (from above)
+ # as well as the old-style 'vars' element. The old-style
+ # vars are given higher precedence here (just in case)
+ task_vars = utils.combine_vars(task_vars, include_vars)
+ if 'vars' in x:
+ task_vars = utils.combine_vars(task_vars, x['vars'])
+
+ new_role = None
+ if 'role_name' in x:
+ new_role = x['role_name']
+
+ mv = task_vars.copy()
+ for t in tokens[1:]:
+ (k,v) = t.split("=", 1)
+ v = unquote(v)
+ mv[k] = template(self.basedir, v, mv)
+ dirname = self.basedir
+ if original_file:
+ dirname = os.path.dirname(original_file)
+
+ # temp vars are used here to avoid trampling on the existing vars structures
+ temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
+ temp_vars = utils.combine_vars(temp_vars, mv)
+ temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
+ include_file = template(dirname, tokens[0], temp_vars)
+ include_filename = utils.path_dwim(dirname, include_file)
+
+ data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password)
+ if 'role_name' in x and data is not None:
+ for y in data:
+ if isinstance(y, dict) and 'include' in y:
+ y['role_name'] = new_role
+ loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
+ results += loaded
+ elif type(x) == dict:
+ task = Task(
+ self, x,
+ module_vars=task_vars,
+ play_vars=self.vars,
+ play_file_vars=self.vars_file_vars,
+ role_vars=self.role_vars,
+ role_params=role_params,
+ default_vars=default_vars,
+ additional_conditions=list(additional_conditions),
+ role_name=role_name
+ )
+ results.append(task)
+ else:
+ raise Exception("unexpected task type")
+
+ for x in results:
+ if self.tags is not None:
+ x.tags.extend(self.tags)
+
+ return results
+
+ # *************************************************
+
+ def tasks(self):
+ ''' return task objects for this play '''
+ return self._tasks
+
+ def handlers(self):
+ ''' return handler objects for this play '''
+ return self._handlers
+
+ # *************************************************
+
+ def _get_vars(self):
+ ''' load the vars section from a play, accounting for all sorts of variable features
+ including loading from yaml files, prompting, and conditional includes of the first
+ file found in a list. '''
+
+ if self.vars is None:
+ self.vars = {}
+
+ if type(self.vars) not in [dict, list]:
+ raise errors.AnsibleError("'vars' section must contain only key/value pairs")
+
+ vars = {}
+
+ # translate a list of vars into a dict
+ if type(self.vars) == list:
+ for item in self.vars:
+ if getattr(item, 'items', None) is None:
+ raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
+ k, v = item.items()[0]
+ vars[k] = v
+ else:
+ vars.update(self.vars)
+
+ if type(self.vars_prompt) == list:
+ for var in self.vars_prompt:
+ if not 'name' in var:
+ raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
+
+ vname = var['name']
+ prompt = var.get("prompt", vname)
+ default = var.get("default", None)
+ private = var.get("private", True)
+
+ confirm = var.get("confirm", False)
+ encrypt = var.get("encrypt", None)
+ salt_size = var.get("salt_size", None)
+ salt = var.get("salt", None)
+
+ if vname not in self.playbook.extra_vars:
+ vars[vname] = self.playbook.callbacks.on_vars_prompt(
+ vname, private, prompt, encrypt, confirm, salt_size, salt, default
+ )
+
+ elif type(self.vars_prompt) == dict:
+ for (vname, prompt) in self.vars_prompt.iteritems():
+ prompt_msg = "%s: " % prompt
+ if vname not in self.playbook.extra_vars:
+ vars[vname] = self.playbook.callbacks.on_vars_prompt(
+ varname=vname, private=False, prompt=prompt_msg, default=None
+ )
+
+ else:
+ raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
+
+ if type(self.playbook.extra_vars) == dict:
+ vars = utils.combine_vars(vars, self.playbook.extra_vars)
+
+ return vars
+
+ # *************************************************
+
+ def update_vars_files(self, hosts, vault_password=None):
+ ''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
+
+ # now loop through all the hosts...
+ for h in hosts:
+ self._update_vars_files_for_host(h, vault_password=vault_password)
+
+ # *************************************************
+
+ def compare_tags(self, tags):
+ ''' given a list of tags that the user has specified, return two lists:
+ matched_tags: tags were found within the current play and match those given
+ by the user
+ unmatched_tags: tags that were found within the current play but do not match
+ any provided by the user '''
+
+ # gather all the tags in all the tasks and handlers into one list
+ # FIXME: isn't this in self.tags already?
+
+ all_tags = []
+ for task in self._tasks:
+ if not task.meta:
+ all_tags.extend(task.tags)
+ for handler in self._handlers:
+ all_tags.extend(handler.tags)
+
+ # compare the lists of tags using sets and return the matched and unmatched
+ all_tags_set = set(all_tags)
+ tags_set = set(tags)
+
+ matched_tags = all_tags_set.intersection(tags_set)
+ unmatched_tags = all_tags_set.difference(tags_set)
+
+ a = set(['always'])
+ u = set(['untagged'])
+ if 'always' in all_tags_set:
+ matched_tags = matched_tags.union(a)
+ unmatched_tags = all_tags_set.difference(a)
+
+ if 'all' in tags_set:
+ matched_tags = matched_tags.union(all_tags_set)
+ unmatched_tags = set()
+
+ if 'tagged' in tags_set:
+ matched_tags = all_tags_set.difference(u)
+ unmatched_tags = u
+
+ if 'untagged' in tags_set and 'untagged' in all_tags_set:
+ matched_tags = matched_tags.union(u)
+ unmatched_tags = unmatched_tags.difference(u)
+
+ return matched_tags, unmatched_tags
+
+ # *************************************************
+
+ def _late_merge_role_tags(self):
+ # build a local dict of tags for roles
+ role_tags = {}
+ for task in self._ds['tasks']:
+ if 'role_name' in task:
+ this_role = task['role_name'] + "-" + task['vars']['role_uuid']
+
+ if this_role not in role_tags:
+ role_tags[this_role] = []
+
+ if 'tags' in task['vars']:
+ if isinstance(task['vars']['tags'], basestring):
+ role_tags[this_role] += shlex.split(task['vars']['tags'])
+ else:
+ role_tags[this_role] += task['vars']['tags']
+
+ # apply each role's tags to its tasks
+ for idx, val in enumerate(self._tasks):
+ if getattr(val, 'role_name', None) is not None:
+ this_role = val.role_name + "-" + val.module_vars['role_uuid']
+ if this_role in role_tags:
+ self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role]))
+
+ # *************************************************
+
+ def _update_vars_files_for_host(self, host, vault_password=None):
+
+ def generate_filenames(host, inject, filename):
+
+ """ Render the raw filename into 3 forms """
+
+ # filename2 is the templated version of the filename, which will
+ # be fully rendered if any variables contained within it are
+ # non-inventory related
+ filename2 = template(self.basedir, filename, self.vars)
+
+ # filename3 is the same as filename2, but when the host object is
+ # available, inventory variables will be expanded as well since the
+ # name is templated with the injected variables
+ filename3 = filename2
+ if host is not None:
+ filename3 = template(self.basedir, filename2, inject)
+
+ # filename4 is the dwim'd path, but may also be mixed-scope, so we use
+ # both play scoped vars and host scoped vars to template the filepath
+ if utils.contains_vars(filename3) and host is not None:
+ inject.update(self.vars)
+ filename4 = template(self.basedir, filename3, inject)
+ filename4 = utils.path_dwim(self.basedir, filename4)
+ else:
+ filename4 = utils.path_dwim(self.basedir, filename3)
+
+ return filename2, filename3, filename4
+
+
+ def update_vars_cache(host, data, target_filename=None):
+
+ """ update a host's varscache with new var data """
+
+ self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
+ if target_filename:
+ self.playbook.callbacks.on_import_for_host(host, target_filename)
+
+ def process_files(filename, filename2, filename3, filename4, host=None):
+
+ """ pseudo-algorithm for deciding where new vars should go """
+
+ data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password)
+ if data:
+ if type(data) != dict:
+ raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
+ if host is not None:
+ target_filename = None
+ if utils.contains_vars(filename2):
+ if not utils.contains_vars(filename3):
+ target_filename = filename3
+ else:
+ target_filename = filename4
+ update_vars_cache(host, data, target_filename=target_filename)
+ else:
+ self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data)
+ # we did process this file
+ return True
+ # we did not process this file
+ return False
+
+ # Enforce that vars_files is always a list
+ if type(self.vars_files) != list:
+ self.vars_files = [ self.vars_files ]
+
+ # Build an inject if this is a host run started by self.update_vars_files
+ if host is not None:
+ inject = {}
+ inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password))
+ inject.update(self.playbook.SETUP_CACHE.get(host, {}))
+ inject.update(self.playbook.VARS_CACHE.get(host, {}))
+ else:
+ inject = None
+
+ processed = []
+ for filename in self.vars_files:
+ if type(filename) == list:
+ # loop over all filenames, loading the first one, and failing if none found
+ found = False
+ sequence = []
+ for real_filename in filename:
+ filename2, filename3, filename4 = generate_filenames(host, inject, real_filename)
+ sequence.append(filename4)
+ if os.path.exists(filename4):
+ found = True
+ if process_files(filename, filename2, filename3, filename4, host=host):
+ processed.append(filename)
+ elif host is not None:
+ self.playbook.callbacks.on_not_import_for_host(host, filename4)
+ if found:
+ break
+ if not found and host is not None:
+ raise errors.AnsibleError(
+ "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
+ )
+ else:
+ # just one filename supplied, load it!
+ filename2, filename3, filename4 = generate_filenames(host, inject, filename)
+ if utils.contains_vars(filename4):
+ continue
+ if process_files(filename, filename2, filename3, filename4, host=host):
+ processed.append(filename)
+
+ return processed
diff --git a/v1/ansible/playbook/task.py b/v1/ansible/playbook/task.py
new file mode 100644
index 00000000000..70c1bc8df6b
--- /dev/null
+++ b/v1/ansible/playbook/task.py
@@ -0,0 +1,346 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+from ansible import errors
+from ansible import utils
+from ansible.module_utils.splitter import split_args
+import os
+import ansible.utils.template as template
+import sys
+
+class Task(object):
+
+ _t_common = [
+ 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass',
+ 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when',
+ 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log',
+ 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user',
+ 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when',
+ ]
+
+ __slots__ = [
+ 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file',
+ 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars',
+ 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars',
+ ] + _t_common
+
+ # to prevent typos and such
+ VALID_KEYS = frozenset([
+ 'async', 'connection', 'include', 'poll',
+ ] + _t_common)
+
+ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None):
+ ''' constructor loads from a task or handler datastructure '''
+
+ # meta directives are used to tell things like ansible/playbook to run
+ # operations like handler execution. Meta tasks are not executed
+ # normally.
+ if 'meta' in ds:
+ self.meta = ds['meta']
+ self.tags = []
+ self.module_vars = module_vars
+ self.role_name = role_name
+ return
+ else:
+ self.meta = None
+
+
+ library = os.path.join(play.basedir, 'library')
+ if os.path.exists(library):
+ utils.plugins.module_finder.add_directory(library)
+
+ for x in ds.keys():
+
+ # code to allow for saying "modulename: args" versus "action: modulename args"
+ if x in utils.plugins.module_finder:
+
+ if 'action' in ds:
+ raise errors.AnsibleError("multiple actions specified in task: '%s' and '%s'" % (x, ds.get('name', ds['action'])))
+ if isinstance(ds[x], dict):
+ if 'args' in ds:
+ raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x]))))
+ ds['args'] = ds[x]
+ ds[x] = ''
+ elif ds[x] is None:
+ ds[x] = ''
+ if not isinstance(ds[x], basestring):
+ raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x])))
+ ds['action'] = x + " " + ds[x]
+ ds.pop(x)
+
+ # code to allow "with_glob" and to reference a lookup plugin named glob
+ elif x.startswith("with_"):
+ if isinstance(ds[x], basestring):
+ param = ds[x].strip()
+
+ plugin_name = x.replace("with_","")
+ if plugin_name in utils.plugins.lookup_loader:
+ ds['items_lookup_plugin'] = plugin_name
+ ds['items_lookup_terms'] = ds[x]
+ ds.pop(x)
+ else:
+ raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
+
+ elif x in [ 'changed_when', 'failed_when', 'when']:
+ if isinstance(ds[x], basestring):
+ param = ds[x].strip()
+ # Only a variable, no logic
+ if (param.startswith('{{') and
+ param.find('}}') == len(ds[x]) - 2 and
+ param.find('|') == -1):
+ utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.")
+ elif x.startswith("when_"):
+ utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True)
+
+ if 'when' in ds:
+ raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action'])))
+ when_name = x.replace("when_","")
+ ds['when'] = "%s %s" % (when_name, ds[x])
+ ds.pop(x)
+ elif not x in Task.VALID_KEYS:
+ raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x)
+
+ self.module_vars = module_vars
+ self.play_vars = play_vars
+ self.play_file_vars = play_file_vars
+ self.role_vars = role_vars
+ self.role_params = role_params
+ self.default_vars = default_vars
+ self.play = play
+
+ # load various attributes
+ self.name = ds.get('name', None)
+ self.tags = [ 'untagged' ]
+ self.register = ds.get('register', None)
+ self.environment = ds.get('environment', play.environment)
+ self.role_name = role_name
+ self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log
+ self.run_once = utils.boolean(ds.get('run_once', 'false'))
+
+ #Code to allow do until feature in a Task
+ if 'until' in ds:
+ if not ds.get('register'):
+ raise errors.AnsibleError("register keyword is mandatory when using do until feature")
+ self.module_vars['delay'] = ds.get('delay', 5)
+ self.module_vars['retries'] = ds.get('retries', 3)
+ self.module_vars['register'] = ds.get('register', None)
+ self.until = ds.get('until')
+ self.module_vars['until'] = self.until
+
+ # rather than simple key=value args on the options line, these represent structured data and the values
+ # can be hashes and lists, not just scalars
+ self.args = ds.get('args', {})
+
+ # get remote_user for task, then play, then playbook
+ if ds.get('remote_user') is not None:
+ self.remote_user = ds.get('remote_user')
+ elif ds.get('remote_user', play.remote_user) is not None:
+ self.remote_user = ds.get('remote_user', play.remote_user)
+ else:
+ self.remote_user = ds.get('remote_user', play.playbook.remote_user)
+
+ # Fail out if user specifies privilege escalation params in conflict
+ if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')):
+ raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
+
+ if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
+ raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name)
+
+ if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
+ raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
+
+ self.become = utils.boolean(ds.get('become', play.become))
+ self.become_method = ds.get('become_method', play.become_method)
+ self.become_user = ds.get('become_user', play.become_user)
+ self.become_pass = ds.get('become_pass', play.playbook.become_pass)
+
+ # set only if passed in current task data
+ if 'sudo' in ds or 'sudo_user' in ds:
+ self.become_method='sudo'
+
+ if 'sudo' in ds:
+ self.become=ds['sudo']
+ del ds['sudo']
+ else:
+ self.become=True
+ if 'sudo_user' in ds:
+ self.become_user = ds['sudo_user']
+ del ds['sudo_user']
+ if 'sudo_pass' in ds:
+ self.become_pass = ds['sudo_pass']
+ del ds['sudo_pass']
+
+ elif 'su' in ds or 'su_user' in ds:
+ self.become_method='su'
+
+ if 'su' in ds:
+ self.become=ds['su']
+ else:
+ self.become=True
+ del ds['su']
+ if 'su_user' in ds:
+ self.become_user = ds['su_user']
+ del ds['su_user']
+ if 'su_pass' in ds:
+ self.become_pass = ds['su_pass']
+ del ds['su_pass']
+
+ # Both are defined
+ if ('action' in ds) and ('local_action' in ds):
+ raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together")
+ # Both are NOT defined
+ elif (not 'action' in ds) and (not 'local_action' in ds):
+ raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', ''))
+ # Only one of them is defined
+ elif 'local_action' in ds:
+ self.action = ds.get('local_action', '')
+ self.delegate_to = '127.0.0.1'
+ else:
+ self.action = ds.get('action', '')
+ self.delegate_to = ds.get('delegate_to', None)
+ self.transport = ds.get('connection', ds.get('transport', play.transport))
+
+ if isinstance(self.action, dict):
+ if 'module' not in self.action:
+ raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action))
+ if self.args:
+ raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action))
+ self.args = self.action
+ self.action = self.args.pop('module')
+
+ # delegate_to can use variables
+ if not (self.delegate_to is None):
+ # delegate_to: localhost should use local transport
+ if self.delegate_to in ['127.0.0.1', 'localhost']:
+ self.transport = 'local'
+
+ # notified by is used by Playbook code to flag which hosts
+ # need to run a notifier
+ self.notified_by = []
+
+ # if no name is specified, use the action line as the name
+ if self.name is None:
+ self.name = self.action
+
+ # load various attributes
+ self.when = ds.get('when', None)
+ self.changed_when = ds.get('changed_when', None)
+ self.failed_when = ds.get('failed_when', None)
+
+ # combine the default and module vars here for use in templating
+ all_vars = self.default_vars.copy()
+ all_vars = utils.combine_vars(all_vars, self.play_vars)
+ all_vars = utils.combine_vars(all_vars, self.play_file_vars)
+ all_vars = utils.combine_vars(all_vars, self.role_vars)
+ all_vars = utils.combine_vars(all_vars, self.module_vars)
+ all_vars = utils.combine_vars(all_vars, self.role_params)
+
+ self.async_seconds = ds.get('async', 0) # not async by default
+ self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars)
+ self.async_seconds = int(self.async_seconds)
+ self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds
+ self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, all_vars)
+ self.async_poll_interval = int(self.async_poll_interval)
+ self.notify = ds.get('notify', [])
+ self.first_available_file = ds.get('first_available_file', None)
+
+ self.items_lookup_plugin = ds.get('items_lookup_plugin', None)
+ self.items_lookup_terms = ds.get('items_lookup_terms', None)
+
+
+ self.ignore_errors = ds.get('ignore_errors', False)
+ self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal)
+
+ self.always_run = ds.get('always_run', False)
+
+ # action should be a string
+ if not isinstance(self.action, basestring):
+ raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name))
+
+ # notify can be a string or a list, store as a list
+ if isinstance(self.notify, basestring):
+ self.notify = [ self.notify ]
+
+ # split the action line into a module name + arguments
+ try:
+ tokens = split_args(self.action)
+ except Exception, e:
+ if "unbalanced" in str(e):
+ raise errors.AnsibleError("There was an error while parsing the task %s.\n" % repr(self.action) + \
+ "Make sure quotes are matched or escaped properly")
+ else:
+ raise
+ if len(tokens) < 1:
+ raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name)
+ self.module_name = tokens[0]
+ self.module_args = ''
+ if len(tokens) > 1:
+ self.module_args = " ".join(tokens[1:])
+
+ import_tags = self.module_vars.get('tags',[])
+ if type(import_tags) in [int,float]:
+ import_tags = str(import_tags)
+ elif type(import_tags) in [str,unicode]:
+ # allow the user to list comma delimited tags
+ import_tags = import_tags.split(",")
+
+ # handle mutually incompatible options
+ incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ]
+ if len(incompatibles) > 1:
+ raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
+
+ # make first_available_file accessible to Runner code
+ if self.first_available_file:
+ self.module_vars['first_available_file'] = self.first_available_file
+ # make sure that the 'item' variable is set when using
+ # first_available_file (issue #8220)
+ if 'item' not in self.module_vars:
+ self.module_vars['item'] = ''
+
+ if self.items_lookup_plugin is not None:
+ self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin
+ self.module_vars['items_lookup_terms'] = self.items_lookup_terms
+
+ # allow runner to see delegate_to option
+ self.module_vars['delegate_to'] = self.delegate_to
+
+ # make some task attributes accessible to Runner code
+ self.module_vars['ignore_errors'] = self.ignore_errors
+ self.module_vars['register'] = self.register
+ self.module_vars['changed_when'] = self.changed_when
+ self.module_vars['failed_when'] = self.failed_when
+ self.module_vars['always_run'] = self.always_run
+
+ # tags allow certain parts of a playbook to be run without running the whole playbook
+ apply_tags = ds.get('tags', None)
+ if apply_tags is not None:
+ if type(apply_tags) in [ str, unicode ]:
+ self.tags.append(apply_tags)
+ elif type(apply_tags) in [ int, float ]:
+ self.tags.append(str(apply_tags))
+ elif type(apply_tags) == list:
+ self.tags.extend(apply_tags)
+ self.tags.extend(import_tags)
+
+ if len(self.tags) > 1:
+ self.tags.remove('untagged')
+
+ if additional_conditions:
+ new_conditions = additional_conditions[:]
+ if self.when:
+ new_conditions.append(self.when)
+ self.when = new_conditions
diff --git a/lib/ansible/runner/__init__.py b/v1/ansible/runner/__init__.py
similarity index 100%
rename from lib/ansible/runner/__init__.py
rename to v1/ansible/runner/__init__.py
diff --git a/lib/ansible/runner/lookup_plugins/__init__.py b/v1/ansible/runner/action_plugins/__init__.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/__init__.py
rename to v1/ansible/runner/action_plugins/__init__.py
diff --git a/lib/ansible/runner/action_plugins/add_host.py b/v1/ansible/runner/action_plugins/add_host.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/add_host.py
rename to v1/ansible/runner/action_plugins/add_host.py
diff --git a/lib/ansible/runner/action_plugins/assemble.py b/v1/ansible/runner/action_plugins/assemble.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/assemble.py
rename to v1/ansible/runner/action_plugins/assemble.py
diff --git a/lib/ansible/runner/action_plugins/assert.py b/v1/ansible/runner/action_plugins/assert.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/assert.py
rename to v1/ansible/runner/action_plugins/assert.py
diff --git a/lib/ansible/runner/action_plugins/async.py b/v1/ansible/runner/action_plugins/async.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/async.py
rename to v1/ansible/runner/action_plugins/async.py
diff --git a/lib/ansible/runner/action_plugins/copy.py b/v1/ansible/runner/action_plugins/copy.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/copy.py
rename to v1/ansible/runner/action_plugins/copy.py
diff --git a/lib/ansible/runner/action_plugins/debug.py b/v1/ansible/runner/action_plugins/debug.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/debug.py
rename to v1/ansible/runner/action_plugins/debug.py
diff --git a/lib/ansible/runner/action_plugins/fail.py b/v1/ansible/runner/action_plugins/fail.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/fail.py
rename to v1/ansible/runner/action_plugins/fail.py
diff --git a/lib/ansible/runner/action_plugins/fetch.py b/v1/ansible/runner/action_plugins/fetch.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/fetch.py
rename to v1/ansible/runner/action_plugins/fetch.py
diff --git a/lib/ansible/runner/action_plugins/group_by.py b/v1/ansible/runner/action_plugins/group_by.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/group_by.py
rename to v1/ansible/runner/action_plugins/group_by.py
diff --git a/lib/ansible/runner/action_plugins/include_vars.py b/v1/ansible/runner/action_plugins/include_vars.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/include_vars.py
rename to v1/ansible/runner/action_plugins/include_vars.py
diff --git a/lib/ansible/runner/action_plugins/normal.py b/v1/ansible/runner/action_plugins/normal.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/normal.py
rename to v1/ansible/runner/action_plugins/normal.py
diff --git a/lib/ansible/runner/action_plugins/patch.py b/v1/ansible/runner/action_plugins/patch.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/patch.py
rename to v1/ansible/runner/action_plugins/patch.py
diff --git a/lib/ansible/runner/action_plugins/pause.py b/v1/ansible/runner/action_plugins/pause.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/pause.py
rename to v1/ansible/runner/action_plugins/pause.py
diff --git a/lib/ansible/runner/action_plugins/raw.py b/v1/ansible/runner/action_plugins/raw.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/raw.py
rename to v1/ansible/runner/action_plugins/raw.py
diff --git a/lib/ansible/runner/action_plugins/script.py b/v1/ansible/runner/action_plugins/script.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/script.py
rename to v1/ansible/runner/action_plugins/script.py
diff --git a/lib/ansible/runner/action_plugins/set_fact.py b/v1/ansible/runner/action_plugins/set_fact.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/set_fact.py
rename to v1/ansible/runner/action_plugins/set_fact.py
diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/v1/ansible/runner/action_plugins/synchronize.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/synchronize.py
rename to v1/ansible/runner/action_plugins/synchronize.py
diff --git a/lib/ansible/runner/action_plugins/template.py b/v1/ansible/runner/action_plugins/template.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/template.py
rename to v1/ansible/runner/action_plugins/template.py
diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/v1/ansible/runner/action_plugins/unarchive.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/unarchive.py
rename to v1/ansible/runner/action_plugins/unarchive.py
diff --git a/lib/ansible/runner/action_plugins/win_copy.py b/v1/ansible/runner/action_plugins/win_copy.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/win_copy.py
rename to v1/ansible/runner/action_plugins/win_copy.py
diff --git a/lib/ansible/runner/action_plugins/win_template.py b/v1/ansible/runner/action_plugins/win_template.py
similarity index 100%
rename from lib/ansible/runner/action_plugins/win_template.py
rename to v1/ansible/runner/action_plugins/win_template.py
diff --git a/lib/ansible/runner/connection.py b/v1/ansible/runner/connection.py
similarity index 100%
rename from lib/ansible/runner/connection.py
rename to v1/ansible/runner/connection.py
diff --git a/lib/ansible/runner/shell_plugins/__init__.py b/v1/ansible/runner/connection_plugins/__init__.py
similarity index 100%
rename from lib/ansible/runner/shell_plugins/__init__.py
rename to v1/ansible/runner/connection_plugins/__init__.py
diff --git a/lib/ansible/runner/connection_plugins/accelerate.py b/v1/ansible/runner/connection_plugins/accelerate.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/accelerate.py
rename to v1/ansible/runner/connection_plugins/accelerate.py
diff --git a/lib/ansible/runner/connection_plugins/chroot.py b/v1/ansible/runner/connection_plugins/chroot.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/chroot.py
rename to v1/ansible/runner/connection_plugins/chroot.py
diff --git a/lib/ansible/runner/connection_plugins/fireball.py b/v1/ansible/runner/connection_plugins/fireball.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/fireball.py
rename to v1/ansible/runner/connection_plugins/fireball.py
diff --git a/lib/ansible/runner/connection_plugins/funcd.py b/v1/ansible/runner/connection_plugins/funcd.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/funcd.py
rename to v1/ansible/runner/connection_plugins/funcd.py
diff --git a/lib/ansible/runner/connection_plugins/jail.py b/v1/ansible/runner/connection_plugins/jail.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/jail.py
rename to v1/ansible/runner/connection_plugins/jail.py
diff --git a/lib/ansible/runner/connection_plugins/libvirt_lxc.py b/v1/ansible/runner/connection_plugins/libvirt_lxc.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/libvirt_lxc.py
rename to v1/ansible/runner/connection_plugins/libvirt_lxc.py
diff --git a/lib/ansible/runner/connection_plugins/local.py b/v1/ansible/runner/connection_plugins/local.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/local.py
rename to v1/ansible/runner/connection_plugins/local.py
diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/v1/ansible/runner/connection_plugins/paramiko_ssh.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/paramiko_ssh.py
rename to v1/ansible/runner/connection_plugins/paramiko_ssh.py
diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/v1/ansible/runner/connection_plugins/ssh.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/ssh.py
rename to v1/ansible/runner/connection_plugins/ssh.py
diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/v1/ansible/runner/connection_plugins/winrm.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/winrm.py
rename to v1/ansible/runner/connection_plugins/winrm.py
diff --git a/lib/ansible/runner/connection_plugins/zone.py b/v1/ansible/runner/connection_plugins/zone.py
similarity index 100%
rename from lib/ansible/runner/connection_plugins/zone.py
rename to v1/ansible/runner/connection_plugins/zone.py
diff --git a/lib/ansible/utils/module_docs_fragments/__init__.py b/v1/ansible/runner/filter_plugins/__init__.py
similarity index 100%
rename from lib/ansible/utils/module_docs_fragments/__init__.py
rename to v1/ansible/runner/filter_plugins/__init__.py
diff --git a/lib/ansible/runner/filter_plugins/core.py b/v1/ansible/runner/filter_plugins/core.py
similarity index 100%
rename from lib/ansible/runner/filter_plugins/core.py
rename to v1/ansible/runner/filter_plugins/core.py
diff --git a/lib/ansible/runner/filter_plugins/ipaddr.py b/v1/ansible/runner/filter_plugins/ipaddr.py
similarity index 100%
rename from lib/ansible/runner/filter_plugins/ipaddr.py
rename to v1/ansible/runner/filter_plugins/ipaddr.py
diff --git a/lib/ansible/runner/filter_plugins/mathstuff.py b/v1/ansible/runner/filter_plugins/mathstuff.py
similarity index 100%
rename from lib/ansible/runner/filter_plugins/mathstuff.py
rename to v1/ansible/runner/filter_plugins/mathstuff.py
diff --git a/v2/ansible/inventory/vars_plugins/__init__.py b/v1/ansible/runner/lookup_plugins/__init__.py
similarity index 100%
rename from v2/ansible/inventory/vars_plugins/__init__.py
rename to v1/ansible/runner/lookup_plugins/__init__.py
diff --git a/lib/ansible/runner/lookup_plugins/cartesian.py b/v1/ansible/runner/lookup_plugins/cartesian.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/cartesian.py
rename to v1/ansible/runner/lookup_plugins/cartesian.py
diff --git a/lib/ansible/runner/lookup_plugins/consul_kv.py b/v1/ansible/runner/lookup_plugins/consul_kv.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/consul_kv.py
rename to v1/ansible/runner/lookup_plugins/consul_kv.py
diff --git a/lib/ansible/runner/lookup_plugins/csvfile.py b/v1/ansible/runner/lookup_plugins/csvfile.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/csvfile.py
rename to v1/ansible/runner/lookup_plugins/csvfile.py
diff --git a/lib/ansible/runner/lookup_plugins/dict.py b/v1/ansible/runner/lookup_plugins/dict.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/dict.py
rename to v1/ansible/runner/lookup_plugins/dict.py
diff --git a/lib/ansible/runner/lookup_plugins/dig.py b/v1/ansible/runner/lookup_plugins/dig.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/dig.py
rename to v1/ansible/runner/lookup_plugins/dig.py
diff --git a/lib/ansible/runner/lookup_plugins/dnstxt.py b/v1/ansible/runner/lookup_plugins/dnstxt.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/dnstxt.py
rename to v1/ansible/runner/lookup_plugins/dnstxt.py
diff --git a/lib/ansible/runner/lookup_plugins/env.py b/v1/ansible/runner/lookup_plugins/env.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/env.py
rename to v1/ansible/runner/lookup_plugins/env.py
diff --git a/lib/ansible/runner/lookup_plugins/etcd.py b/v1/ansible/runner/lookup_plugins/etcd.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/etcd.py
rename to v1/ansible/runner/lookup_plugins/etcd.py
diff --git a/lib/ansible/runner/lookup_plugins/file.py b/v1/ansible/runner/lookup_plugins/file.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/file.py
rename to v1/ansible/runner/lookup_plugins/file.py
diff --git a/lib/ansible/runner/lookup_plugins/fileglob.py b/v1/ansible/runner/lookup_plugins/fileglob.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/fileglob.py
rename to v1/ansible/runner/lookup_plugins/fileglob.py
diff --git a/lib/ansible/runner/lookup_plugins/first_found.py b/v1/ansible/runner/lookup_plugins/first_found.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/first_found.py
rename to v1/ansible/runner/lookup_plugins/first_found.py
diff --git a/lib/ansible/runner/lookup_plugins/flattened.py b/v1/ansible/runner/lookup_plugins/flattened.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/flattened.py
rename to v1/ansible/runner/lookup_plugins/flattened.py
diff --git a/lib/ansible/runner/lookup_plugins/indexed_items.py b/v1/ansible/runner/lookup_plugins/indexed_items.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/indexed_items.py
rename to v1/ansible/runner/lookup_plugins/indexed_items.py
diff --git a/lib/ansible/runner/lookup_plugins/inventory_hostnames.py b/v1/ansible/runner/lookup_plugins/inventory_hostnames.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/inventory_hostnames.py
rename to v1/ansible/runner/lookup_plugins/inventory_hostnames.py
diff --git a/lib/ansible/runner/lookup_plugins/items.py b/v1/ansible/runner/lookup_plugins/items.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/items.py
rename to v1/ansible/runner/lookup_plugins/items.py
diff --git a/lib/ansible/runner/lookup_plugins/lines.py b/v1/ansible/runner/lookup_plugins/lines.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/lines.py
rename to v1/ansible/runner/lookup_plugins/lines.py
diff --git a/lib/ansible/runner/lookup_plugins/nested.py b/v1/ansible/runner/lookup_plugins/nested.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/nested.py
rename to v1/ansible/runner/lookup_plugins/nested.py
diff --git a/lib/ansible/runner/lookup_plugins/password.py b/v1/ansible/runner/lookup_plugins/password.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/password.py
rename to v1/ansible/runner/lookup_plugins/password.py
diff --git a/lib/ansible/runner/lookup_plugins/pipe.py b/v1/ansible/runner/lookup_plugins/pipe.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/pipe.py
rename to v1/ansible/runner/lookup_plugins/pipe.py
diff --git a/lib/ansible/runner/lookup_plugins/random_choice.py b/v1/ansible/runner/lookup_plugins/random_choice.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/random_choice.py
rename to v1/ansible/runner/lookup_plugins/random_choice.py
diff --git a/lib/ansible/runner/lookup_plugins/redis_kv.py b/v1/ansible/runner/lookup_plugins/redis_kv.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/redis_kv.py
rename to v1/ansible/runner/lookup_plugins/redis_kv.py
diff --git a/lib/ansible/runner/lookup_plugins/sequence.py b/v1/ansible/runner/lookup_plugins/sequence.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/sequence.py
rename to v1/ansible/runner/lookup_plugins/sequence.py
diff --git a/lib/ansible/runner/lookup_plugins/subelements.py b/v1/ansible/runner/lookup_plugins/subelements.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/subelements.py
rename to v1/ansible/runner/lookup_plugins/subelements.py
diff --git a/lib/ansible/runner/lookup_plugins/template.py b/v1/ansible/runner/lookup_plugins/template.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/template.py
rename to v1/ansible/runner/lookup_plugins/template.py
diff --git a/lib/ansible/runner/lookup_plugins/together.py b/v1/ansible/runner/lookup_plugins/together.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/together.py
rename to v1/ansible/runner/lookup_plugins/together.py
diff --git a/lib/ansible/runner/lookup_plugins/url.py b/v1/ansible/runner/lookup_plugins/url.py
similarity index 100%
rename from lib/ansible/runner/lookup_plugins/url.py
rename to v1/ansible/runner/lookup_plugins/url.py
diff --git a/lib/ansible/runner/poller.py b/v1/ansible/runner/poller.py
similarity index 100%
rename from lib/ansible/runner/poller.py
rename to v1/ansible/runner/poller.py
diff --git a/lib/ansible/runner/return_data.py b/v1/ansible/runner/return_data.py
similarity index 100%
rename from lib/ansible/runner/return_data.py
rename to v1/ansible/runner/return_data.py
diff --git a/v2/test/parsing/yaml/__init__.py b/v1/ansible/runner/shell_plugins/__init__.py
similarity index 100%
rename from v2/test/parsing/yaml/__init__.py
rename to v1/ansible/runner/shell_plugins/__init__.py
diff --git a/lib/ansible/runner/shell_plugins/csh.py b/v1/ansible/runner/shell_plugins/csh.py
similarity index 100%
rename from lib/ansible/runner/shell_plugins/csh.py
rename to v1/ansible/runner/shell_plugins/csh.py
diff --git a/lib/ansible/runner/shell_plugins/fish.py b/v1/ansible/runner/shell_plugins/fish.py
similarity index 100%
rename from lib/ansible/runner/shell_plugins/fish.py
rename to v1/ansible/runner/shell_plugins/fish.py
diff --git a/lib/ansible/runner/shell_plugins/powershell.py b/v1/ansible/runner/shell_plugins/powershell.py
similarity index 100%
rename from lib/ansible/runner/shell_plugins/powershell.py
rename to v1/ansible/runner/shell_plugins/powershell.py
diff --git a/lib/ansible/runner/shell_plugins/sh.py b/v1/ansible/runner/shell_plugins/sh.py
similarity index 100%
rename from lib/ansible/runner/shell_plugins/sh.py
rename to v1/ansible/runner/shell_plugins/sh.py
diff --git a/v1/ansible/utils/__init__.py b/v1/ansible/utils/__init__.py
new file mode 100644
index 00000000000..7ed07a54c84
--- /dev/null
+++ b/v1/ansible/utils/__init__.py
@@ -0,0 +1,1660 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import errno
+import sys
+import re
+import os
+import shlex
+import yaml
+import copy
+import optparse
+import operator
+from ansible import errors
+from ansible import __version__
+from ansible.utils.display_functions import *
+from ansible.utils.plugins import *
+from ansible.utils.su_prompts import *
+from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
+from ansible.callbacks import display
+from ansible.module_utils.splitter import split_args, unquote
+from ansible.module_utils.basic import heuristic_log_sanitize
+from ansible.utils.unicode import to_bytes, to_unicode
+import ansible.constants as C
+import ast
+import time
+import StringIO
+import stat
+import termios
+import tty
+import pipes
+import random
+import difflib
+import warnings
+import traceback
+import getpass
+import sys
+import subprocess
+import contextlib
+
+from vault import VaultLib
+
+VERBOSITY=0
+
+MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
+
+# caching the compilation of the regex used
+# to check for lookup calls within data
+LOOKUP_REGEX = re.compile(r'lookup\s*\(')
+PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
+CODE_REGEX = re.compile(r'(?:{%|%})')
+
+
+try:
+ # simplejson can be much faster if it's available
+ import simplejson as json
+except ImportError:
+ import json
+
+try:
+ from yaml import CSafeLoader as Loader
+except ImportError:
+ from yaml import SafeLoader as Loader
+
+PASSLIB_AVAILABLE = False
+try:
+ import passlib.hash
+ PASSLIB_AVAILABLE = True
+except:
+ pass
+
+try:
+ import builtin
+except ImportError:
+ import __builtin__ as builtin
+
+KEYCZAR_AVAILABLE=False
+try:
+ try:
+ # some versions of pycrypto may not have this?
+ from Crypto.pct_warnings import PowmInsecureWarning
+ except ImportError:
+ PowmInsecureWarning = RuntimeWarning
+
+ with warnings.catch_warnings(record=True) as warning_handler:
+ warnings.simplefilter("error", PowmInsecureWarning)
+ try:
+ import keyczar.errors as key_errors
+ from keyczar.keys import AesKey
+ except PowmInsecureWarning:
+ system_warning(
+ "The version of gmp you have installed has a known issue regarding " + \
+ "timing vulnerabilities when used with pycrypto. " + \
+ "If possible, you should update it (i.e. yum update gmp)."
+ )
+ warnings.resetwarnings()
+ warnings.simplefilter("ignore")
+ import keyczar.errors as key_errors
+ from keyczar.keys import AesKey
+ KEYCZAR_AVAILABLE=True
+except ImportError:
+ pass
+
+
+###############################################################
+# Abstractions around keyczar
+###############################################################
+
+def key_for_hostname(hostname):
+ # fireball mode is an implementation of ansible firing up zeromq via SSH
+ # to use no persistent daemons or key management
+
+ if not KEYCZAR_AVAILABLE:
+ raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
+
+ key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
+ if not os.path.exists(key_path):
+ os.makedirs(key_path, mode=0700)
+ os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
+ elif not os.path.isdir(key_path):
+ raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
+
+ if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
+ raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
+
+ key_path = os.path.join(key_path, hostname)
+
+ # use new AES keys every 2 hours, which means fireball must not allow running for longer either
+ if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
+ key = AesKey.Generate()
+ fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
+ fh = os.fdopen(fd, 'w')
+ fh.write(str(key))
+ fh.close()
+ return key
+ else:
+ if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
+ raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
+ fh = open(key_path)
+ key = AesKey.Read(fh.read())
+ fh.close()
+ return key
+
+def encrypt(key, msg):
+ return key.Encrypt(msg)
+
+def decrypt(key, msg):
+ try:
+ return key.Decrypt(msg)
+ except key_errors.InvalidSignatureError:
+ raise errors.AnsibleError("decryption failed")
+
+###############################################################
+# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
+###############################################################
+
+def read_vault_file(vault_password_file):
+ """Read a vault password from a file or if executable, execute the script and
+ retrieve password from STDOUT
+ """
+ if vault_password_file:
+ this_path = os.path.realpath(os.path.expanduser(vault_password_file))
+ if is_executable(this_path):
+ try:
+ # STDERR not captured to make it easier for users to prompt for input in their scripts
+ p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
+ except OSError, e:
+ raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
+ stdout, stderr = p.communicate()
+ vault_pass = stdout.strip('\r\n')
+ else:
+ try:
+ f = open(this_path, "rb")
+ vault_pass=f.read().strip()
+ f.close()
+ except (OSError, IOError), e:
+ raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
+
+ return vault_pass
+ else:
+ return None
+
+def err(msg):
+ ''' print an error message to stderr '''
+
+ print >> sys.stderr, msg
+
+def exit(msg, rc=1):
+ ''' quit with an error to stdout and a failure code '''
+
+ err(msg)
+ sys.exit(rc)
+
+def jsonify(result, format=False):
+ ''' format JSON output (uncompressed or uncompressed) '''
+
+ if result is None:
+ return "{}"
+ result2 = result.copy()
+ for key, value in result2.items():
+ if type(value) is str:
+ result2[key] = value.decode('utf-8', 'ignore')
+
+ indent = None
+ if format:
+ indent = 4
+
+ try:
+ return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
+ except UnicodeDecodeError:
+ return json.dumps(result2, sort_keys=True, indent=indent)
+
+def write_tree_file(tree, hostname, buf):
+ ''' write something into treedir/hostname '''
+
+ # TODO: might be nice to append playbook runs per host in a similar way
+ # in which case, we'd want append mode.
+ path = os.path.join(tree, hostname)
+ fd = open(path, "w+")
+ fd.write(buf)
+ fd.close()
+
+def is_failed(result):
+ ''' is a given JSON result a failed result? '''
+
+ return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
+
+def is_changed(result):
+ ''' is a given JSON result a changed result? '''
+
+ return (result.get('changed', False) in [ True, 'True', 'true'])
+
+def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
+ from ansible.utils import template
+
+ if conditional is None or conditional == '':
+ return True
+
+ if isinstance(conditional, list):
+ for x in conditional:
+ if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
+ return False
+ return True
+
+ if not isinstance(conditional, basestring):
+ return conditional
+
+ conditional = conditional.replace("jinja2_compare ","")
+ # allow variable names
+ if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
+ conditional = to_unicode(inject[conditional], nonstring='simplerepr')
+ conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
+ original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
+ # a Jinja2 evaluation that results in something Python can eval!
+ presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
+ conditional = template.template(basedir, presented, inject)
+ val = conditional.strip()
+ if val == presented:
+ # the templating failed, meaning most likely a
+ # variable was undefined. If we happened to be
+ # looking for an undefined variable, return True,
+ # otherwise fail
+ if "is undefined" in conditional:
+ return True
+ elif "is defined" in conditional:
+ return False
+ else:
+ raise errors.AnsibleError("error while evaluating conditional: %s" % original)
+ elif val == "True":
+ return True
+ elif val == "False":
+ return False
+ else:
+ raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
+
+def is_executable(path):
+ '''is the given path executable?'''
+ return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
+ or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
+ or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
+
+def unfrackpath(path):
+ '''
+ returns a path that is free of symlinks, environment
+ variables, relative path traversals and symbols (~)
+ example:
+ '$HOME/../../var/mail' becomes '/var/spool/mail'
+ '''
+ return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
+
+def prepare_writeable_dir(tree,mode=0777):
+ ''' make sure a directory exists and is writeable '''
+
+ # modify the mode to ensure the owner at least
+ # has read/write access to this directory
+ mode |= 0700
+
+ # make sure the tree path is always expanded
+ # and normalized and free of symlinks
+ tree = unfrackpath(tree)
+
+ if not os.path.exists(tree):
+ try:
+ os.makedirs(tree, mode)
+ except (IOError, OSError), e:
+ raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
+ if not os.access(tree, os.W_OK):
+ raise errors.AnsibleError("Cannot write to path %s" % tree)
+ return tree
+
+def path_dwim(basedir, given):
+ '''
+ make relative paths work like folks expect.
+ '''
+
+ if given.startswith("'"):
+ given = given[1:-1]
+
+ if given.startswith("/"):
+ return os.path.abspath(given)
+ elif given.startswith("~"):
+ return os.path.abspath(os.path.expanduser(given))
+ else:
+ if basedir is None:
+ basedir = "."
+ return os.path.abspath(os.path.join(basedir, given))
+
+def path_dwim_relative(original, dirname, source, playbook_base, check=True):
+ ''' find one file in a directory one level up in a dir named dirname relative to current '''
+ # (used by roles code)
+
+ from ansible.utils import template
+
+
+ basedir = os.path.dirname(original)
+ if os.path.islink(basedir):
+ basedir = unfrackpath(basedir)
+ template2 = os.path.join(basedir, dirname, source)
+ else:
+ template2 = os.path.join(basedir, '..', dirname, source)
+ source2 = path_dwim(basedir, template2)
+ if os.path.exists(source2):
+ return source2
+ obvious_local_path = path_dwim(playbook_base, source)
+ if os.path.exists(obvious_local_path):
+ return obvious_local_path
+ if check:
+ raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
+ return source2 # which does not exist
+
+def repo_url_to_role_name(repo_url):
+ # gets the role name out of a repo like
+ # http://git.example.com/repos/repo.git" => "repo"
+
+ if '://' not in repo_url and '@' not in repo_url:
+ return repo_url
+ trailing_path = repo_url.split('/')[-1]
+ if trailing_path.endswith('.git'):
+ trailing_path = trailing_path[:-4]
+ if trailing_path.endswith('.tar.gz'):
+ trailing_path = trailing_path[:-7]
+ if ',' in trailing_path:
+ trailing_path = trailing_path.split(',')[0]
+ return trailing_path
+
+
+def role_spec_parse(role_spec):
+ # takes a repo and a version like
+ # git+http://git.example.com/repos/repo.git,v1.0
+ # and returns a list of properties such as:
+ # {
+ # 'scm': 'git',
+ # 'src': 'http://git.example.com/repos/repo.git',
+ # 'version': 'v1.0',
+ # 'name': 'repo'
+ # }
+
+ role_spec = role_spec.strip()
+ role_version = ''
+ default_role_versions = dict(git='master', hg='tip')
+ if role_spec == "" or role_spec.startswith("#"):
+ return (None, None, None, None)
+
+ tokens = [s.strip() for s in role_spec.split(',')]
+
+ # assume https://github.com URLs are git+https:// URLs and not
+ # tarballs unless they end in '.zip'
+ if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
+ tokens[0] = 'git+' + tokens[0]
+
+ if '+' in tokens[0]:
+ (scm, role_url) = tokens[0].split('+')
+ else:
+ scm = None
+ role_url = tokens[0]
+ if len(tokens) >= 2:
+ role_version = tokens[1]
+ if len(tokens) == 3:
+ role_name = tokens[2]
+ else:
+ role_name = repo_url_to_role_name(tokens[0])
+ if scm and not role_version:
+ role_version = default_role_versions.get(scm, '')
+ return dict(scm=scm, src=role_url, version=role_version, name=role_name)
+
+
+def role_yaml_parse(role):
+ if 'role' in role:
+ # Old style: {role: "galaxy.role,version,name", other_vars: "here" }
+ role_info = role_spec_parse(role['role'])
+ if isinstance(role_info, dict):
+ # Warning: Slight change in behaviour here. name may be being
+ # overloaded. Previously, name was only a parameter to the role.
+ # Now it is both a parameter to the role and the name that
+ # ansible-galaxy will install under on the local system.
+ if 'name' in role and 'name' in role_info:
+ del role_info['name']
+ role.update(role_info)
+ else:
+ # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
+ if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
+ role["src"] = "git+" + role["src"]
+
+ if '+' in role["src"]:
+ (scm, src) = role["src"].split('+')
+ role["scm"] = scm
+ role["src"] = src
+
+ if 'name' not in role:
+ role["name"] = repo_url_to_role_name(role["src"])
+
+ if 'version' not in role:
+ role['version'] = ''
+
+ if 'scm' not in role:
+ role['scm'] = None
+
+ return role
+
+
+def json_loads(data):
+ ''' parse a JSON string and return a data structure '''
+ try:
+ loaded = json.loads(data)
+ except ValueError,e:
+ raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
+
+ return loaded
+
+def _clean_data(orig_data, from_remote=False, from_inventory=False):
+ ''' remove jinja2 template tags from a string '''
+
+ if not isinstance(orig_data, basestring):
+ return orig_data
+
+ # when the data is marked as having come from a remote, we always
+ # replace any print blocks (ie. {{var}}), however when marked as coming
+ # from inventory we only replace print blocks that contain a call to
+ # a lookup plugin (ie. {{lookup('foo','bar'))}})
+ replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
+
+ regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
+
+ with contextlib.closing(StringIO.StringIO(orig_data)) as data:
+ # these variables keep track of opening block locations, as we only
+ # want to replace matched pairs of print/block tags
+ print_openings = []
+ block_openings = []
+ for mo in regex.finditer(orig_data):
+ token = mo.group(0)
+ token_start = mo.start(0)
+
+ if token[0] == '{':
+ if token == '{%':
+ block_openings.append(token_start)
+ elif token == '{{':
+ print_openings.append(token_start)
+
+ elif token[1] == '}':
+ prev_idx = None
+ if token == '%}' and block_openings:
+ prev_idx = block_openings.pop()
+ elif token == '}}' and print_openings:
+ prev_idx = print_openings.pop()
+
+ if prev_idx is not None:
+ # replace the opening
+ data.seek(prev_idx, os.SEEK_SET)
+ data.write('{#')
+ # replace the closing
+ data.seek(token_start, os.SEEK_SET)
+ data.write('#}')
+
+ else:
+ assert False, 'Unhandled regex match'
+
+ return data.getvalue()
+
+def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
+ '''
+ walk a complex data structure, and use _clean_data() to
+ remove any template tags that may exist
+ '''
+ if not from_remote and not from_inventory:
+ raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
+ if isinstance(orig_data, dict):
+ data = orig_data.copy()
+ for key in data:
+ new_key = _clean_data_struct(key, from_remote, from_inventory)
+ new_val = _clean_data_struct(data[key], from_remote, from_inventory)
+ if key != new_key:
+ del data[key]
+ data[new_key] = new_val
+ elif isinstance(orig_data, list):
+ data = orig_data[:]
+ for i in range(0, len(data)):
+ data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
+ elif isinstance(orig_data, basestring):
+ data = _clean_data(orig_data, from_remote, from_inventory)
+ else:
+ data = orig_data
+ return data
+
+def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
+ ''' this version for module return data only '''
+
+ orig_data = raw_data
+
+ # ignore stuff like tcgetattr spewage or other warnings
+ data = filter_leading_non_json_lines(raw_data)
+
+ try:
+ results = json.loads(data)
+ except:
+ if no_exceptions:
+ return dict(failed=True, parsed=False, msg=raw_data)
+ else:
+ raise
+
+ if from_remote:
+ results = _clean_data_struct(results, from_remote, from_inventory)
+
+ return results
+
+def serialize_args(args):
+ '''
+ Flattens a dictionary args to a k=v string
+ '''
+ module_args = ""
+ for (k,v) in args.iteritems():
+ if isinstance(v, basestring):
+ module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
+ elif isinstance(v, bool):
+ module_args = "%s=%s %s" % (k, str(v), module_args)
+ return module_args.strip()
+
+def merge_module_args(current_args, new_args):
+ '''
+ merges either a dictionary or string of k=v pairs with another string of k=v pairs,
+ and returns a new k=v string without duplicates.
+ '''
+ if not isinstance(current_args, basestring):
+ raise errors.AnsibleError("expected current_args to be a basestring")
+ # we use parse_kv to split up the current args into a dictionary
+ final_args = parse_kv(current_args)
+ if isinstance(new_args, dict):
+ final_args.update(new_args)
+ elif isinstance(new_args, basestring):
+ new_args_kv = parse_kv(new_args)
+ final_args.update(new_args_kv)
+ return serialize_args(final_args)
+
+def parse_yaml(data, path_hint=None):
+ ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
+
+ stripped_data = data.lstrip()
+ loaded = None
+ if stripped_data.startswith("{") or stripped_data.startswith("["):
+ # since the line starts with { or [ we can infer this is a JSON document.
+ try:
+ loaded = json.loads(data)
+ except ValueError, ve:
+ if path_hint:
+ raise errors.AnsibleError(path_hint + ": " + str(ve))
+ else:
+ raise errors.AnsibleError(str(ve))
+ else:
+ # else this is pretty sure to be a YAML document
+ loaded = yaml.load(data, Loader=Loader)
+
+ return loaded
+
+def process_common_errors(msg, probline, column):
+ replaced = probline.replace(" ","")
+
+ if ":{{" in replaced and "}}" in replaced:
+ msg = msg + """
+This one looks easy to fix. YAML thought it was looking for the start of a
+hash/dictionary and was confused to see a second "{". Most likely this was
+meant to be an ansible template evaluation instead, so we have to give the
+parser a small hint that we wanted a string instead. The solution here is to
+just quote the entire value.
+
+For instance, if the original line was:
+
+ app_path: {{ base_path }}/foo
+
+It should be written as:
+
+ app_path: "{{ base_path }}/foo"
+"""
+ return msg
+
+ elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
+ msg = msg + """
+This one looks easy to fix. There seems to be an extra unquoted colon in the line
+and this is confusing the parser. It was only expecting to find one free
+colon. The solution is just add some quotes around the colon, or quote the
+entire line after the first colon.
+
+For instance, if the original line was:
+
+ copy: src=file.txt dest=/path/filename:with_colon.txt
+
+It can be written as:
+
+ copy: src=file.txt dest='/path/filename:with_colon.txt'
+
+Or:
+
+ copy: 'src=file.txt dest=/path/filename:with_colon.txt'
+
+
+"""
+ return msg
+ else:
+ parts = probline.split(":")
+ if len(parts) > 1:
+ middle = parts[1].strip()
+ match = False
+ unbalanced = False
+ if middle.startswith("'") and not middle.endswith("'"):
+ match = True
+ elif middle.startswith('"') and not middle.endswith('"'):
+ match = True
+ if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
+ unbalanced = True
+ if match:
+ msg = msg + """
+This one looks easy to fix. It seems that there is a value started
+with a quote, and the YAML parser is expecting to see the line ended
+with the same kind of quote. For instance:
+
+ when: "ok" in result.stdout
+
+Could be written as:
+
+ when: '"ok" in result.stdout'
+
+or equivalently:
+
+ when: "'ok' in result.stdout"
+
+"""
+ return msg
+
+ if unbalanced:
+ msg = msg + """
+We could be wrong, but this one looks like it might be an issue with
+unbalanced quotes. If starting a value with a quote, make sure the
+line ends with the same set of quotes. For instance this arbitrary
+example:
+
+ foo: "bad" "wolf"
+
+Could be written as:
+
+ foo: '"bad" "wolf"'
+
+"""
+ return msg
+
+ return msg
+
+def process_yaml_error(exc, data, path=None, show_content=True):
+ if hasattr(exc, 'problem_mark'):
+ mark = exc.problem_mark
+ if show_content:
+ if mark.line -1 >= 0:
+ before_probline = data.split("\n")[mark.line-1]
+ else:
+ before_probline = ''
+ probline = data.split("\n")[mark.line]
+ arrow = " " * mark.column + "^"
+ msg = """Syntax Error while loading YAML script, %s
+Note: The error may actually appear before this position: line %s, column %s
+
+%s
+%s
+%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
+
+ unquoted_var = None
+ if '{{' in probline and '}}' in probline:
+ if '"{{' not in probline or "'{{" not in probline:
+ unquoted_var = True
+
+ if not unquoted_var:
+ msg = process_common_errors(msg, probline, mark.column)
+ else:
+ msg = msg + """
+We could be wrong, but this one looks like it might be an issue with
+missing quotes. Always quote template expression brackets when they
+start a value. For instance:
+
+ with_items:
+ - {{ foo }}
+
+Should be written as:
+
+ with_items:
+ - "{{ foo }}"
+
+"""
+ else:
+ # most likely displaying a file with sensitive content,
+ # so don't show any of the actual lines of yaml just the
+ # line number itself
+ msg = """Syntax error while loading YAML script, %s
+The error appears to have been on line %s, column %s, but may actually
+be before there depending on the exact syntax problem.
+""" % (path, mark.line + 1, mark.column + 1)
+
+ else:
+ # No problem markers means we have to throw a generic
+ # "stuff messed up" type message. Sry bud.
+ if path:
+ msg = "Could not parse YAML. Check over %s again." % path
+ else:
+ msg = "Could not parse YAML."
+ raise errors.AnsibleYAMLValidationFailed(msg)
+
+
+def parse_yaml_from_file(path, vault_password=None):
+ ''' convert a yaml file to a data structure '''
+
+ data = None
+ show_content = True
+
+ try:
+ data = open(path).read()
+ except IOError:
+ raise errors.AnsibleError("file could not read: %s" % path)
+
+ vault = VaultLib(password=vault_password)
+ if vault.is_encrypted(data):
+ # if the file is encrypted and no password was specified,
+ # the decrypt call would throw an error, but we check first
+ # since the decrypt function doesn't know the file name
+ if vault_password is None:
+ raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
+ data = vault.decrypt(data)
+ show_content = False
+
+ try:
+ return parse_yaml(data, path_hint=path)
+ except yaml.YAMLError, exc:
+ process_yaml_error(exc, data, path, show_content)
+
+def parse_kv(args):
+ ''' convert a string of key/value items to a dict '''
+ options = {}
+ if args is not None:
+ try:
+ vargs = split_args(args)
+ except ValueError, ve:
+ if 'no closing quotation' in str(ve).lower():
+ raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
+ else:
+ raise
+ for x in vargs:
+ if "=" in x:
+ k, v = x.split("=",1)
+ options[k.strip()] = unquote(v.strip())
+ return options
+
+def _validate_both_dicts(a, b):
+
+ if not (isinstance(a, dict) and isinstance(b, dict)):
+ raise errors.AnsibleError(
+ "failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
+ )
+
+def merge_hash(a, b):
+ ''' recursively merges hash b into a
+ keys from b take precedence over keys from a '''
+
+ result = {}
+
+ # we check here as well as in combine_vars() since this
+ # function can work recursively with nested dicts
+ _validate_both_dicts(a, b)
+
+ for dicts in a, b:
+ # next, iterate over b keys and values
+ for k, v in dicts.iteritems():
+ # if there's already such key in a
+ # and that key contains dict
+ if k in result and isinstance(result[k], dict):
+ # merge those dicts recursively
+ result[k] = merge_hash(a[k], v)
+ else:
+ # otherwise, just copy a value from b to a
+ result[k] = v
+
+ return result
+
+def default(value, function):
+ ''' syntactic sugar around lazy evaluation of defaults '''
+ if value is None:
+ return function()
+ return value
+
+
+def _git_repo_info(repo_path):
+ ''' returns a string containing git branch, commit id and commit date '''
+ result = None
+ if os.path.exists(repo_path):
+ # Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
+ if os.path.isfile(repo_path):
+ try:
+ gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
+ # There is a possibility the .git file to have an absolute path.
+ if os.path.isabs(gitdir):
+ repo_path = gitdir
+ else:
+ repo_path = os.path.join(repo_path[:-4], gitdir)
+ except (IOError, AttributeError):
+ return ''
+ f = open(os.path.join(repo_path, "HEAD"))
+ branch = f.readline().split('/')[-1].rstrip("\n")
+ f.close()
+ branch_path = os.path.join(repo_path, "refs", "heads", branch)
+ if os.path.exists(branch_path):
+ f = open(branch_path)
+ commit = f.readline()[:10]
+ f.close()
+ else:
+ # detached HEAD
+ commit = branch[:10]
+ branch = 'detached HEAD'
+ branch_path = os.path.join(repo_path, "HEAD")
+
+ date = time.localtime(os.stat(branch_path).st_mtime)
+ if time.daylight == 0:
+ offset = time.timezone
+ else:
+ offset = time.altzone
+ result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
+ time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
+ else:
+ result = ''
+ return result
+
+
+def _gitinfo():
+ basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
+ repo_path = os.path.join(basedir, '.git')
+ result = _git_repo_info(repo_path)
+ submodules = os.path.join(basedir, '.gitmodules')
+ if not os.path.exists(submodules):
+ return result
+ f = open(submodules)
+ for line in f:
+ tokens = line.strip().split(' ')
+ if tokens[0] == 'path':
+ submodule_path = tokens[2]
+ submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
+ if not submodule_info:
+ submodule_info = ' not found - use git submodule update --init ' + submodule_path
+ result += "\n {0}: {1}".format(submodule_path, submodule_info)
+ f.close()
+ return result
+
+
+def version(prog):
+ result = "{0} {1}".format(prog, __version__)
+ gitinfo = _gitinfo()
+ if gitinfo:
+ result = result + " {0}".format(gitinfo)
+ result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
+ return result
+
+def version_info(gitinfo=False):
+ if gitinfo:
+ # expensive call, user with care
+ ansible_version_string = version('')
+ else:
+ ansible_version_string = __version__
+ ansible_version = ansible_version_string.split()[0]
+ ansible_versions = ansible_version.split('.')
+ for counter in range(len(ansible_versions)):
+ if ansible_versions[counter] == "":
+ ansible_versions[counter] = 0
+ try:
+ ansible_versions[counter] = int(ansible_versions[counter])
+ except:
+ pass
+ if len(ansible_versions) < 3:
+ for counter in range(len(ansible_versions), 3):
+ ansible_versions.append(0)
+ return {'string': ansible_version_string.strip(),
+ 'full': ansible_version,
+ 'major': ansible_versions[0],
+ 'minor': ansible_versions[1],
+ 'revision': ansible_versions[2]}
+
+def getch():
+ ''' read in a single character '''
+ fd = sys.stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(sys.stdin.fileno())
+ ch = sys.stdin.read(1)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ return ch
+
+def sanitize_output(arg_string):
+ ''' strips private info out of a string '''
+
+ private_keys = ('password', 'login_password')
+
+ output = []
+ for part in arg_string.split():
+ try:
+ (k, v) = part.split('=', 1)
+ except ValueError:
+ v = heuristic_log_sanitize(part)
+ output.append(v)
+ continue
+
+ if k in private_keys:
+ v = 'VALUE_HIDDEN'
+ else:
+ v = heuristic_log_sanitize(v)
+ output.append('%s=%s' % (k, v))
+
+ output = ' '.join(output)
+ return output
+
+
+####################################################################
+# option handling code for /usr/bin/ansible and ansible-playbook
+# below this line
+
+class SortedOptParser(optparse.OptionParser):
+ '''Optparser which sorts the options by opt before outputting --help'''
+
+ def format_help(self, formatter=None):
+ self.option_list.sort(key=operator.methodcaller('get_opt_string'))
+ return optparse.OptionParser.format_help(self, formatter=None)
+
+def increment_debug(option, opt, value, parser):
+ global VERBOSITY
+ VERBOSITY += 1
+
+def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
+ async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
+ ''' create an options parser for any ansible script '''
+
+ parser = SortedOptParser(usage, version=version("%prog"))
+ parser.add_option('-v','--verbose', default=False, action="callback",
+ callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
+
+ parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
+ help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
+ parser.add_option('-i', '--inventory-file', dest='inventory',
+ help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
+ default=constants.DEFAULT_HOST_LIST)
+ parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
+ help="set additional variables as key=value or YAML/JSON", default=[])
+ parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
+ help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
+ parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
+ help='ask for SSH password')
+ parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
+ help='use this file to authenticate the connection')
+ parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
+ help='ask for vault password')
+ parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
+ dest='vault_password_file', help="vault password file")
+ parser.add_option('--list-hosts', dest='listhosts', action='store_true',
+ help='outputs a list of matching hosts; does not execute anything else')
+ parser.add_option('-M', '--module-path', dest='module_path',
+ help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
+ default=None)
+
+ if subset_opts:
+ parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
+ help='further limit selected hosts to an additional pattern')
+
+ parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
+ dest='timeout',
+ help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
+
+ if output_opts:
+ parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
+ help='condense output')
+ parser.add_option('-t', '--tree', dest='tree', default=None,
+ help='log output to this directory')
+
+ if runas_opts:
+ # priv user defaults to root later on to enable detecting when this option was given here
+ parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
+ help='ask for sudo password (deprecated, use become)')
+ parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
+ help='ask for su password (deprecated, use become)')
+ parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
+ help="run operations with sudo (nopasswd) (deprecated, use become)")
+ parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
+ help='desired sudo user (default=root) (deprecated, use become)')
+ parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
+ help='run operations with su (deprecated, use become)')
+ parser.add_option('-R', '--su-user', default=None,
+ help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
+
+ # consolidated privilege escalation (become)
+ parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
+ help="run operations with become (nopasswd implied)")
+ parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
+ help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
+ parser.add_option('--become-user', default=None, dest='become_user', type='string',
+ help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
+ parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
+ help='ask for privilege escalation password')
+
+
+ if connect_opts:
+ parser.add_option('-c', '--connection', dest='connection',
+ default=constants.DEFAULT_TRANSPORT,
+ help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
+
+ if async_opts:
+ parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
+ dest='poll_interval',
+ help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
+ parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
+ help='run asynchronously, failing after X seconds (default=N/A)')
+
+ if check_opts:
+ parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
+ help="don't make any changes; instead, try to predict some of the changes that may occur"
+ )
+
+ if diff_opts:
+ parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
+ help="when changing (small) files and templates, show the differences in those files; works great with --check"
+ )
+
+ return parser
+
+def parse_extra_vars(extra_vars_opts, vault_pass):
+ extra_vars = {}
+ for extra_vars_opt in extra_vars_opts:
+ extra_vars_opt = to_unicode(extra_vars_opt)
+ if extra_vars_opt.startswith(u"@"):
+ # Argument is a YAML file (JSON is a subset of YAML)
+ extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
+ elif extra_vars_opt and extra_vars_opt[0] in u'[{':
+ # Arguments as YAML
+ extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
+ else:
+ # Arguments as Key-value
+ extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
+ return extra_vars
+
+def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
+
+ vault_pass = None
+ new_vault_pass = None
+
+ if ask_vault_pass:
+ vault_pass = getpass.getpass(prompt="Vault password: ")
+
+ if ask_vault_pass and confirm_vault:
+ vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
+ if vault_pass != vault_pass2:
+ raise errors.AnsibleError("Passwords do not match")
+
+ if ask_new_vault_pass:
+ new_vault_pass = getpass.getpass(prompt="New Vault password: ")
+
+ if ask_new_vault_pass and confirm_new:
+ new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
+ if new_vault_pass != new_vault_pass2:
+ raise errors.AnsibleError("Passwords do not match")
+
+ # enforce no newline chars at the end of passwords
+ if vault_pass:
+ vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
+ if new_vault_pass:
+ new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
+
+ return vault_pass, new_vault_pass
+
+def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
+ sshpass = None
+ becomepass = None
+ vaultpass = None
+ become_prompt = ''
+
+ if ask_pass:
+ sshpass = getpass.getpass(prompt="SSH password: ")
+ become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
+ if sshpass:
+ sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
+ else:
+ become_prompt = "%s password: " % become_method.upper()
+
+ if become_ask_pass:
+ becomepass = getpass.getpass(prompt=become_prompt)
+ if ask_pass and becomepass == '':
+ becomepass = sshpass
+ if becomepass:
+ becomepass = to_bytes(becomepass)
+
+ if ask_vault_pass:
+ vaultpass = getpass.getpass(prompt="Vault password: ")
+ if vaultpass:
+ vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
+
+ return (sshpass, becomepass, vaultpass)
+
+
+def choose_pass_prompt(options):
+
+ if options.ask_su_pass:
+ return 'su'
+ elif options.ask_sudo_pass:
+ return 'sudo'
+
+ return options.become_method
+
+def normalize_become_options(options):
+
+ options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
+ options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
+
+ if options.become:
+ pass
+ elif options.sudo:
+ options.become = True
+ options.become_method = 'sudo'
+ elif options.su:
+ options.become = True
+ options.become_method = 'su'
+
+
+def do_encrypt(result, encrypt, salt_size=None, salt=None):
+ if PASSLIB_AVAILABLE:
+ try:
+ crypt = getattr(passlib.hash, encrypt)
+ except:
+ raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
+
+ if salt_size:
+ result = crypt.encrypt(result, salt_size=salt_size)
+ elif salt:
+ result = crypt.encrypt(result, salt=salt)
+ else:
+ result = crypt.encrypt(result)
+ else:
+ raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
+
+ return result
+
+def last_non_blank_line(buf):
+
+ all_lines = buf.splitlines()
+ all_lines.reverse()
+ for line in all_lines:
+ if (len(line) > 0):
+ return line
+ # shouldn't occur unless there's no output
+ return ""
+
+def filter_leading_non_json_lines(buf):
+ '''
+ used to avoid random output from SSH at the top of JSON output, like messages from
+ tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
+
+ need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
+ filter only leading lines since multiline JSON is valid.
+ '''
+
+ filtered_lines = StringIO.StringIO()
+ stop_filtering = False
+ for line in buf.splitlines():
+ if stop_filtering or line.startswith('{') or line.startswith('['):
+ stop_filtering = True
+ filtered_lines.write(line + '\n')
+ return filtered_lines.getvalue()
+
+def boolean(value):
+ val = str(value)
+ if val.lower() in [ "true", "t", "y", "1", "yes" ]:
+ return True
+ else:
+ return False
+
+def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
+ """
+ helper function for connection plugins to create privilege escalation commands
+ """
+
+ randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
+ success_key = 'BECOME-SUCCESS-%s' % randbits
+ prompt = None
+ becomecmd = None
+
+ shell = shell or '$SHELL'
+
+ if method == 'sudo':
+ # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
+ # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
+ # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
+ # string to the user's shell. We loop reading output until we see the randomly-generated
+ # sudo prompt set with the -p option.
+ prompt = '[sudo via ansible, key=%s] password: ' % randbits
+ exe = exe or C.DEFAULT_SUDO_EXE
+ becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
+ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
+
+ elif method == 'su':
+ exe = exe or C.DEFAULT_SU_EXE
+ flags = flags or C.DEFAULT_SU_FLAGS
+ becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
+
+ elif method == 'pbrun':
+ prompt = 'assword:'
+ exe = exe or 'pbrun'
+ flags = flags or ''
+ becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
+
+ elif method == 'pfexec':
+ exe = exe or 'pfexec'
+ flags = flags or ''
+ # No user as it uses it's own exec_attr to figure it out
+ becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
+
+ if becomecmd is None:
+ raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
+
+ return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
+
+
+def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
+ """
+ helper function for connection plugins to create sudo commands
+ """
+ return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
+
+
+def make_su_cmd(su_user, executable, cmd):
+ """
+ Helper function for connection plugins to create direct su commands
+ """
+ return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
+
+def get_diff(diff):
+ # called by --diff usage in playbook and runner via callbacks
+ # include names in diffs 'before' and 'after' and do diff -U 10
+
+ try:
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+ ret = []
+ if 'dst_binary' in diff:
+ ret.append("diff skipped: destination file appears to be binary\n")
+ if 'src_binary' in diff:
+ ret.append("diff skipped: source file appears to be binary\n")
+ if 'dst_larger' in diff:
+ ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
+ if 'src_larger' in diff:
+ ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
+ if 'before' in diff and 'after' in diff:
+ if 'before_header' in diff:
+ before_header = "before: %s" % diff['before_header']
+ else:
+ before_header = 'before'
+ if 'after_header' in diff:
+ after_header = "after: %s" % diff['after_header']
+ else:
+ after_header = 'after'
+ differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
+ for line in list(differ):
+ ret.append(line)
+ return u"".join(ret)
+ except UnicodeDecodeError:
+ return ">> the files are different, but the diff library cannot compare unicode strings"
+
+def is_list_of_strings(items):
+ for x in items:
+ if not isinstance(x, basestring):
+ return False
+ return True
+
+def list_union(a, b):
+ result = []
+ for x in a:
+ if x not in result:
+ result.append(x)
+ for x in b:
+ if x not in result:
+ result.append(x)
+ return result
+
+def list_intersection(a, b):
+ result = []
+ for x in a:
+ if x in b and x not in result:
+ result.append(x)
+ return result
+
+def list_difference(a, b):
+ result = []
+ for x in a:
+ if x not in b and x not in result:
+ result.append(x)
+ for x in b:
+ if x not in a and x not in result:
+ result.append(x)
+ return result
+
+def contains_vars(data):
+ '''
+ returns True if the data contains a variable pattern
+ '''
+ return "$" in data or "{{" in data
+
+def safe_eval(expr, locals={}, include_exceptions=False):
+ '''
+ This is intended for allowing things like:
+ with_items: a_list_variable
+
+ Where Jinja2 would return a string but we do not want to allow it to
+ call functions (outside of Jinja2, where the env is constrained). If
+ the input data to this function came from an untrusted (remote) source,
+ it should first be run through _clean_data_struct() to ensure the data
+ is further sanitized prior to evaluation.
+
+ Based on:
+ http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
+ '''
+
+ # this is the whitelist of AST nodes we are going to
+ # allow in the evaluation. Any node type other than
+ # those listed here will raise an exception in our custom
+ # visitor class defined below.
+ SAFE_NODES = set(
+ (
+ ast.Add,
+ ast.BinOp,
+ ast.Call,
+ ast.Compare,
+ ast.Dict,
+ ast.Div,
+ ast.Expression,
+ ast.List,
+ ast.Load,
+ ast.Mult,
+ ast.Num,
+ ast.Name,
+ ast.Str,
+ ast.Sub,
+ ast.Tuple,
+ ast.UnaryOp,
+ )
+ )
+
+ # AST node types were expanded after 2.6
+ if not sys.version.startswith('2.6'):
+ SAFE_NODES.union(
+ set(
+ (ast.Set,)
+ )
+ )
+
+ filter_list = []
+ for filter in filter_loader.all():
+ filter_list.extend(filter.filters().keys())
+
+ CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
+
+ class CleansingNodeVisitor(ast.NodeVisitor):
+ def generic_visit(self, node, inside_call=False):
+ if type(node) not in SAFE_NODES:
+ raise Exception("invalid expression (%s)" % expr)
+ elif isinstance(node, ast.Call):
+ inside_call = True
+ elif isinstance(node, ast.Name) and inside_call:
+ if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
+ raise Exception("invalid function: %s" % node.id)
+ # iterate over all child nodes
+ for child_node in ast.iter_child_nodes(node):
+ self.generic_visit(child_node, inside_call)
+
+ if not isinstance(expr, basestring):
+ # already templated to a datastructure, perhaps?
+ if include_exceptions:
+ return (expr, None)
+ return expr
+
+ cnv = CleansingNodeVisitor()
+ try:
+ parsed_tree = ast.parse(expr, mode='eval')
+ cnv.visit(parsed_tree)
+ compiled = compile(parsed_tree, expr, 'eval')
+ result = eval(compiled, {}, locals)
+
+ if include_exceptions:
+ return (result, None)
+ else:
+ return result
+ except SyntaxError, e:
+ # special handling for syntax errors, we just return
+ # the expression string back as-is
+ if include_exceptions:
+ return (expr, None)
+ return expr
+ except Exception, e:
+ if include_exceptions:
+ return (expr, e)
+ return expr
+
+
+def listify_lookup_plugin_terms(terms, basedir, inject):
+
+ from ansible.utils import template
+
+ if isinstance(terms, basestring):
+ # someone did:
+ # with_items: alist
+ # OR
+ # with_items: {{ alist }}
+
+ stripped = terms.strip()
+ if not (stripped.startswith('{') or stripped.startswith('[')) and \
+ not stripped.startswith("/") and \
+ not stripped.startswith('set([') and \
+ not LOOKUP_REGEX.search(terms):
+ # if not already a list, get ready to evaluate with Jinja2
+ # not sure why the "/" is in above code :)
+ try:
+ new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
+ if isinstance(new_terms, basestring) and "{{" in new_terms:
+ pass
+ else:
+ terms = new_terms
+ except:
+ pass
+
+ if '{' in terms or '[' in terms:
+ # Jinja2 already evaluated a variable to a list.
+ # Jinja2-ified list needs to be converted back to a real type
+ # TODO: something a bit less heavy than eval
+ return safe_eval(terms)
+
+ if isinstance(terms, basestring):
+ terms = [ terms ]
+
+ return terms
+
+def combine_vars(a, b):
+
+ _validate_both_dicts(a, b)
+
+ if C.DEFAULT_HASH_BEHAVIOUR == "merge":
+ return merge_hash(a, b)
+ else:
+ return dict(a.items() + b.items())
+
+def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
+ '''Return a random password string of length containing only chars.'''
+
+ password = []
+ while len(password) < length:
+ new_char = os.urandom(1)
+ if new_char in chars:
+ password.append(new_char)
+
+ return ''.join(password)
+
+def before_comment(msg):
+ ''' what's the part of a string before a comment? '''
+ msg = msg.replace("\#","**NOT_A_COMMENT**")
+ msg = msg.split("#")[0]
+ msg = msg.replace("**NOT_A_COMMENT**","#")
+ return msg
+
+def load_vars(basepath, results, vault_password=None):
+ """
+ Load variables from any potential yaml filename combinations of basepath,
+ returning result.
+ """
+
+ paths_to_check = [ "".join([basepath, ext])
+ for ext in C.YAML_FILENAME_EXTENSIONS ]
+
+ found_paths = []
+
+ for path in paths_to_check:
+ found, results = _load_vars_from_path(path, results, vault_password=vault_password)
+ if found:
+ found_paths.append(path)
+
+
+ # disallow the potentially confusing situation that there are multiple
+ # variable files for the same name. For example if both group_vars/all.yml
+ # and group_vars/all.yaml
+ if len(found_paths) > 1:
+ raise errors.AnsibleError("Multiple variable files found. "
+ "There should only be one. %s" % ( found_paths, ))
+
+ return results
+
+## load variables from yaml files/dirs
+# e.g. host/group_vars
+#
+def _load_vars_from_path(path, results, vault_password=None):
+ """
+ Robustly access the file at path and load variables, carefully reporting
+ errors in a friendly/informative way.
+
+ Return the tuple (found, new_results, )
+ """
+
+ try:
+ # in the case of a symbolic link, we want the stat of the link itself,
+ # not its target
+ pathstat = os.lstat(path)
+ except os.error, err:
+ # most common case is that nothing exists at that path.
+ if err.errno == errno.ENOENT:
+ return False, results
+ # otherwise this is a condition we should report to the user
+ raise errors.AnsibleError(
+ "%s is not accessible: %s."
+ " Please check its permissions." % ( path, err.strerror))
+
+ # symbolic link
+ if stat.S_ISLNK(pathstat.st_mode):
+ try:
+ target = os.path.realpath(path)
+ except os.error, err2:
+ raise errors.AnsibleError("The symbolic link at %s "
+ "is not readable: %s. Please check its permissions."
+ % (path, err2.strerror, ))
+ # follow symbolic link chains by recursing, so we repeat the same
+ # permissions checks above and provide useful errors.
+ return _load_vars_from_path(target, results, vault_password)
+
+ # directory
+ if stat.S_ISDIR(pathstat.st_mode):
+
+ # support organizing variables across multiple files in a directory
+ return True, _load_vars_from_folder(path, results, vault_password=vault_password)
+
+ # regular file
+ elif stat.S_ISREG(pathstat.st_mode):
+ data = parse_yaml_from_file(path, vault_password=vault_password)
+ if data and type(data) != dict:
+ raise errors.AnsibleError(
+ "%s must be stored as a dictionary/hash" % path)
+ elif data is None:
+ data = {}
+
+ # combine vars overrides by default but can be configured to do a
+ # hash merge in settings
+ results = combine_vars(results, data)
+ return True, results
+
+ # something else? could be a fifo, socket, device, etc.
+ else:
+ raise errors.AnsibleError("Expected a variable file or directory "
+ "but found a non-file object at path %s" % (path, ))
+
+def _load_vars_from_folder(folder_path, results, vault_password=None):
+ """
+ Load all variables within a folder recursively.
+ """
+
+ # this function and _load_vars_from_path are mutually recursive
+
+ try:
+ names = os.listdir(folder_path)
+ except os.error, err:
+ raise errors.AnsibleError(
+ "This folder cannot be listed: %s: %s."
+ % ( folder_path, err.strerror))
+
+ # evaluate files in a stable order rather than whatever order the
+ # filesystem lists them.
+ names.sort()
+
+ # do not parse hidden files or dirs, e.g. .svn/
+ paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')]
+ for path in paths:
+ _found, results = _load_vars_from_path(path, results, vault_password=vault_password)
+ return results
+
+def update_hash(hash, key, new_value):
+ ''' used to avoid nested .update calls on the parent '''
+
+ value = hash.get(key, {})
+ value.update(new_value)
+ hash[key] = value
+
+def censor_unlogged_data(data):
+ '''
+ used when the no_log: True attribute is passed to a task to keep data from a callback.
+ NOT intended to prevent variable registration, but only things from showing up on
+ screen
+ '''
+ new_data = {}
+ for (x,y) in data.iteritems():
+ if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
+ new_data[x] = y
+ new_data['censored'] = 'results hidden due to no_log parameter'
+ return new_data
+
+def check_mutually_exclusive_privilege(options, parser):
+
+ # privilege escalation command line arguments need to be mutually exclusive
+ if (options.su or options.su_user or options.ask_su_pass) and \
+ (options.sudo or options.sudo_user or options.ask_sudo_pass) or \
+ (options.su or options.su_user or options.ask_su_pass) and \
+ (options.become or options.become_user or options.become_ask_pass) or \
+ (options.sudo or options.sudo_user or options.ask_sudo_pass) and \
+ (options.become or options.become_user or options.become_ask_pass):
+
+ parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
+ "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
+ "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
+ " are exclusive of each other")
+
+
diff --git a/lib/ansible/utils/cmd_functions.py b/v1/ansible/utils/cmd_functions.py
similarity index 100%
rename from lib/ansible/utils/cmd_functions.py
rename to v1/ansible/utils/cmd_functions.py
diff --git a/lib/ansible/utils/display_functions.py b/v1/ansible/utils/display_functions.py
similarity index 100%
rename from lib/ansible/utils/display_functions.py
rename to v1/ansible/utils/display_functions.py
diff --git a/v2/ansible/utils/hashing.py b/v1/ansible/utils/hashing.py
similarity index 92%
rename from v2/ansible/utils/hashing.py
rename to v1/ansible/utils/hashing.py
index 5e378db79f4..a7d142e5bd4 100644
--- a/v2/ansible/utils/hashing.py
+++ b/v1/ansible/utils/hashing.py
@@ -20,7 +20,6 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
-from ansible.errors import AnsibleError
# Note, sha1 is the only hash algorithm compatible with python2.4 and with
# FIPS-140 mode (as of 11-2014)
@@ -44,8 +43,6 @@ def secure_hash_s(data, hash_func=sha1):
digest = hash_func()
try:
- if not isinstance(data, basestring):
- data = "%s" % data
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
@@ -65,8 +62,8 @@ def secure_hash(filename, hash_func=sha1):
digest.update(block)
block = infile.read(blocksize)
infile.close()
- except IOError as e:
- raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
+ except IOError, e:
+ raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
diff --git a/v2/ansible/utils/module_docs.py b/v1/ansible/utils/module_docs.py
similarity index 96%
rename from v2/ansible/utils/module_docs.py
rename to v1/ansible/utils/module_docs.py
index 632b4a00c2a..ee99af2cb54 100644
--- a/v2/ansible/utils/module_docs.py
+++ b/v1/ansible/utils/module_docs.py
@@ -23,7 +23,7 @@ import ast
import yaml
import traceback
-from ansible.plugins import fragment_loader
+from ansible import utils
# modules that are ok that they do not have documentation strings
BLACKLIST_MODULES = [
@@ -66,7 +66,7 @@ def get_docstring(filename, verbose=False):
if fragment_slug != 'doesnotexist':
- fragment_class = fragment_loader.get(fragment_name)
+ fragment_class = utils.plugins.fragment_loader.get(fragment_name)
assert fragment_class is not None
fragment_yaml = getattr(fragment_class, fragment_var, '{}')
diff --git a/v1/ansible/utils/module_docs_fragments/__init__.py b/v1/ansible/utils/module_docs_fragments/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/lib/ansible/utils/module_docs_fragments/aws.py b/v1/ansible/utils/module_docs_fragments/aws.py
similarity index 100%
rename from lib/ansible/utils/module_docs_fragments/aws.py
rename to v1/ansible/utils/module_docs_fragments/aws.py
diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/v1/ansible/utils/module_docs_fragments/cloudstack.py
similarity index 100%
rename from lib/ansible/utils/module_docs_fragments/cloudstack.py
rename to v1/ansible/utils/module_docs_fragments/cloudstack.py
diff --git a/lib/ansible/utils/module_docs_fragments/files.py b/v1/ansible/utils/module_docs_fragments/files.py
similarity index 100%
rename from lib/ansible/utils/module_docs_fragments/files.py
rename to v1/ansible/utils/module_docs_fragments/files.py
diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/v1/ansible/utils/module_docs_fragments/openstack.py
similarity index 100%
rename from lib/ansible/utils/module_docs_fragments/openstack.py
rename to v1/ansible/utils/module_docs_fragments/openstack.py
diff --git a/lib/ansible/utils/module_docs_fragments/rackspace.py b/v1/ansible/utils/module_docs_fragments/rackspace.py
similarity index 100%
rename from lib/ansible/utils/module_docs_fragments/rackspace.py
rename to v1/ansible/utils/module_docs_fragments/rackspace.py
diff --git a/lib/ansible/utils/plugins.py b/v1/ansible/utils/plugins.py
similarity index 100%
rename from lib/ansible/utils/plugins.py
rename to v1/ansible/utils/plugins.py
diff --git a/lib/ansible/utils/string_functions.py b/v1/ansible/utils/string_functions.py
similarity index 100%
rename from lib/ansible/utils/string_functions.py
rename to v1/ansible/utils/string_functions.py
diff --git a/lib/ansible/utils/su_prompts.py b/v1/ansible/utils/su_prompts.py
similarity index 100%
rename from lib/ansible/utils/su_prompts.py
rename to v1/ansible/utils/su_prompts.py
diff --git a/lib/ansible/utils/template.py b/v1/ansible/utils/template.py
similarity index 100%
rename from lib/ansible/utils/template.py
rename to v1/ansible/utils/template.py
diff --git a/v2/ansible/utils/unicode.py b/v1/ansible/utils/unicode.py
similarity index 93%
rename from v2/ansible/utils/unicode.py
rename to v1/ansible/utils/unicode.py
index 2cff2e5e45c..7bd035c0075 100644
--- a/v2/ansible/utils/unicode.py
+++ b/v1/ansible/utils/unicode.py
@@ -19,8 +19,6 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from six import string_types, text_type, binary_type, PY3
-
# to_bytes and to_unicode were written by Toshio Kuratomi for the
# python-kitchen library https://pypi.python.org/pypi/kitchen
# They are licensed in kitchen under the terms of the GPLv2+
@@ -37,9 +35,6 @@ _LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1',
# EXCEPTION_CONVERTERS is defined below due to using to_unicode
-if PY3:
- basestring = (str, bytes)
-
def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
'''Convert an object into a :class:`unicode` string
@@ -94,12 +89,12 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
# Could use isbasestring/isunicode here but we want this code to be as
# fast as possible
if isinstance(obj, basestring):
- if isinstance(obj, text_type):
+ if isinstance(obj, unicode):
return obj
if encoding in _UTF8_ALIASES:
- return text_type(obj, 'utf-8', errors)
+ return unicode(obj, 'utf-8', errors)
if encoding in _LATIN1_ALIASES:
- return text_type(obj, 'latin-1', errors)
+ return unicode(obj, 'latin-1', errors)
return obj.decode(encoding, errors)
if not nonstring:
@@ -115,19 +110,19 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
simple = None
if not simple:
try:
- simple = text_type(obj)
+ simple = str(obj)
except UnicodeError:
try:
simple = obj.__str__()
except (UnicodeError, AttributeError):
simple = u''
- if isinstance(simple, binary_type):
- return text_type(simple, encoding, errors)
+ if isinstance(simple, str):
+ return unicode(simple, encoding, errors)
return simple
elif nonstring in ('repr', 'strict'):
obj_repr = repr(obj)
- if isinstance(obj_repr, binary_type):
- obj_repr = text_type(obj_repr, encoding, errors)
+ if isinstance(obj_repr, str):
+ obj_repr = unicode(obj_repr, encoding, errors)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_unicode was given "%(obj)s" which is neither'
@@ -203,19 +198,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
# Could use isbasestring, isbytestring here but we want this to be as fast
# as possible
if isinstance(obj, basestring):
- if isinstance(obj, binary_type):
+ if isinstance(obj, str):
return obj
return obj.encode(encoding, errors)
if not nonstring:
nonstring = 'simplerepr'
if nonstring == 'empty':
- return b''
+ return ''
elif nonstring == 'passthru':
return obj
elif nonstring == 'simplerepr':
try:
- simple = binary_type(obj)
+ simple = str(obj)
except UnicodeError:
try:
simple = obj.__str__()
@@ -225,19 +220,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
try:
simple = obj.__unicode__()
except (AttributeError, UnicodeError):
- simple = b''
- if isinstance(simple, text_type):
+ simple = ''
+ if isinstance(simple, unicode):
simple = simple.encode(encoding, 'replace')
return simple
elif nonstring in ('repr', 'strict'):
try:
obj_repr = obj.__repr__()
except (AttributeError, UnicodeError):
- obj_repr = b''
- if isinstance(obj_repr, text_type):
+ obj_repr = ''
+ if isinstance(obj_repr, unicode):
obj_repr = obj_repr.encode(encoding, errors)
else:
- obj_repr = binary_type(obj_repr)
+ obj_repr = str(obj_repr)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_bytes was given "%(obj)s" which is neither'
diff --git a/v1/ansible/utils/vault.py b/v1/ansible/utils/vault.py
new file mode 100644
index 00000000000..842688a2c18
--- /dev/null
+++ b/v1/ansible/utils/vault.py
@@ -0,0 +1,585 @@
+# (c) 2014, James Tanner
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+# ansible-pull is a script that runs ansible in local mode
+# after checking out a playbooks directory from source repo. There is an
+# example playbook to bootstrap this script in the examples/ dir which
+# installs ansible and sets it up to run on cron.
+
+import os
+import shlex
+import shutil
+import tempfile
+from io import BytesIO
+from subprocess import call
+from ansible import errors
+from hashlib import sha256
+
+# Note: Only used for loading obsolete VaultAES files. All files are written
+# using the newer VaultAES256 which does not require md5
+try:
+ from hashlib import md5
+except ImportError:
+ try:
+ from md5 import md5
+ except ImportError:
+ # MD5 unavailable. Possibly FIPS mode
+ md5 = None
+
+from binascii import hexlify
+from binascii import unhexlify
+from ansible import constants as C
+
+try:
+ from Crypto.Hash import SHA256, HMAC
+ HAS_HASH = True
+except ImportError:
+ HAS_HASH = False
+
+# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
+try:
+ from Crypto.Util import Counter
+ HAS_COUNTER = True
+except ImportError:
+ HAS_COUNTER = False
+
+# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
+try:
+ from Crypto.Protocol.KDF import PBKDF2
+ HAS_PBKDF2 = True
+except ImportError:
+ HAS_PBKDF2 = False
+
+# AES IMPORTS
+try:
+ from Crypto.Cipher import AES as AES
+ HAS_AES = True
+except ImportError:
+ HAS_AES = False
+
+CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto"
+
+HEADER='$ANSIBLE_VAULT'
+CIPHER_WHITELIST=['AES', 'AES256']
+
+class VaultLib(object):
+
+ def __init__(self, password):
+ self.password = password
+ self.cipher_name = None
+ self.version = '1.1'
+
+ def is_encrypted(self, data):
+ if data.startswith(HEADER):
+ return True
+ else:
+ return False
+
+ def encrypt(self, data):
+
+ if self.is_encrypted(data):
+ raise errors.AnsibleError("data is already encrypted")
+
+ if not self.cipher_name:
+ self.cipher_name = "AES256"
+ #raise errors.AnsibleError("the cipher must be set before encrypting data")
+
+ if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
+ cipher = globals()['Vault' + self.cipher_name]
+ this_cipher = cipher()
+ else:
+ raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name)
+
+ """
+ # combine sha + data
+ this_sha = sha256(data).hexdigest()
+ tmp_data = this_sha + "\n" + data
+ """
+
+ # encrypt sha + data
+ enc_data = this_cipher.encrypt(data, self.password)
+
+ # add header
+ tmp_data = self._add_header(enc_data)
+ return tmp_data
+
+ def decrypt(self, data):
+ if self.password is None:
+ raise errors.AnsibleError("A vault password must be specified to decrypt data")
+
+ if not self.is_encrypted(data):
+ raise errors.AnsibleError("data is not encrypted")
+
+ # clean out header
+ data = self._split_header(data)
+
+ # create the cipher object
+ if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
+ cipher = globals()['Vault' + self.cipher_name]
+ this_cipher = cipher()
+ else:
+ raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name)
+
+ # try to unencrypt data
+ data = this_cipher.decrypt(data, self.password)
+ if data is None:
+ raise errors.AnsibleError("Decryption failed")
+
+ return data
+
+ def _add_header(self, data):
+ # combine header and encrypted data in 80 char columns
+
+ #tmpdata = hexlify(data)
+ tmpdata = [data[i:i+80] for i in range(0, len(data), 80)]
+
+ if not self.cipher_name:
+ raise errors.AnsibleError("the cipher must be set before adding a header")
+
+ dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n"
+
+ for l in tmpdata:
+ dirty_data += l + '\n'
+
+ return dirty_data
+
+
+ def _split_header(self, data):
+ # used by decrypt
+
+ tmpdata = data.split('\n')
+ tmpheader = tmpdata[0].strip().split(';')
+
+ self.version = str(tmpheader[1].strip())
+ self.cipher_name = str(tmpheader[2].strip())
+ clean_data = '\n'.join(tmpdata[1:])
+
+ """
+ # strip out newline, join, unhex
+ clean_data = [ x.strip() for x in clean_data ]
+ clean_data = unhexlify(''.join(clean_data))
+ """
+
+ return clean_data
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *err):
+ pass
+
+class VaultEditor(object):
+ # uses helper methods for write_file(self, filename, data)
+ # to write a file so that code isn't duplicated for simple
+ # file I/O, ditto read_file(self, filename) and launch_editor(self, filename)
+ # ... "Don't Repeat Yourself", etc.
+
+ def __init__(self, cipher_name, password, filename):
+ # instantiates a member variable for VaultLib
+ self.cipher_name = cipher_name
+ self.password = password
+ self.filename = filename
+
+ def _edit_file_helper(self, existing_data=None, cipher=None):
+ # make sure the umask is set to a sane value
+ old_umask = os.umask(0o077)
+
+ # Create a tempfile
+ _, tmp_path = tempfile.mkstemp()
+
+ if existing_data:
+ self.write_data(existing_data, tmp_path)
+
+ # drop the user into an editor on the tmp file
+ try:
+ call(self._editor_shell_command(tmp_path))
+ except OSError, e:
+ raise Exception("Failed to open editor (%s): %s" % (self._editor_shell_command(tmp_path)[0],str(e)))
+ tmpdata = self.read_data(tmp_path)
+
+ # create new vault
+ this_vault = VaultLib(self.password)
+ if cipher:
+ this_vault.cipher_name = cipher
+
+ # encrypt new data and write out to tmp
+ enc_data = this_vault.encrypt(tmpdata)
+ self.write_data(enc_data, tmp_path)
+
+ # shuffle tmp file into place
+ self.shuffle_files(tmp_path, self.filename)
+
+ # and restore umask
+ os.umask(old_umask)
+
+ def create_file(self):
+ """ create a new encrypted file """
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ if os.path.isfile(self.filename):
+ raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename)
+
+ # Let the user specify contents and save file
+ self._edit_file_helper(cipher=self.cipher_name)
+
+ def decrypt_file(self):
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ if not os.path.isfile(self.filename):
+ raise errors.AnsibleError("%s does not exist" % self.filename)
+
+ tmpdata = self.read_data(self.filename)
+ this_vault = VaultLib(self.password)
+ if this_vault.is_encrypted(tmpdata):
+ dec_data = this_vault.decrypt(tmpdata)
+ if dec_data is None:
+ raise errors.AnsibleError("Decryption failed")
+ else:
+ self.write_data(dec_data, self.filename)
+ else:
+ raise errors.AnsibleError("%s is not encrypted" % self.filename)
+
+ def edit_file(self):
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ # decrypt to tmpfile
+ tmpdata = self.read_data(self.filename)
+ this_vault = VaultLib(self.password)
+ dec_data = this_vault.decrypt(tmpdata)
+
+ # let the user edit the data and save
+ self._edit_file_helper(existing_data=dec_data)
+ ###we want the cipher to default to AES256 (get rid of files
+ # encrypted with the AES cipher)
+ #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name)
+
+
+ def view_file(self):
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ # decrypt to tmpfile
+ tmpdata = self.read_data(self.filename)
+ this_vault = VaultLib(self.password)
+ dec_data = this_vault.decrypt(tmpdata)
+ old_umask = os.umask(0o077)
+ _, tmp_path = tempfile.mkstemp()
+ self.write_data(dec_data, tmp_path)
+ os.umask(old_umask)
+
+ # drop the user into pager on the tmp file
+ call(self._pager_shell_command(tmp_path))
+ os.remove(tmp_path)
+
+ def encrypt_file(self):
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ if not os.path.isfile(self.filename):
+ raise errors.AnsibleError("%s does not exist" % self.filename)
+
+ tmpdata = self.read_data(self.filename)
+ this_vault = VaultLib(self.password)
+ this_vault.cipher_name = self.cipher_name
+ if not this_vault.is_encrypted(tmpdata):
+ enc_data = this_vault.encrypt(tmpdata)
+ self.write_data(enc_data, self.filename)
+ else:
+ raise errors.AnsibleError("%s is already encrypted" % self.filename)
+
+ def rekey_file(self, new_password):
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ # decrypt
+ tmpdata = self.read_data(self.filename)
+ this_vault = VaultLib(self.password)
+ dec_data = this_vault.decrypt(tmpdata)
+
+ # create new vault
+ new_vault = VaultLib(new_password)
+
+ # we want to force cipher to the default
+ #new_vault.cipher_name = this_vault.cipher_name
+
+ # re-encrypt data and re-write file
+ enc_data = new_vault.encrypt(dec_data)
+ self.write_data(enc_data, self.filename)
+
+ def read_data(self, filename):
+ f = open(filename, "rb")
+ tmpdata = f.read()
+ f.close()
+ return tmpdata
+
+ def write_data(self, data, filename):
+ if os.path.isfile(filename):
+ os.remove(filename)
+ f = open(filename, "wb")
+ f.write(data)
+ f.close()
+
+ def shuffle_files(self, src, dest):
+ # overwrite dest with src
+ if os.path.isfile(dest):
+ os.remove(dest)
+ shutil.move(src, dest)
+
+ def _editor_shell_command(self, filename):
+ EDITOR = os.environ.get('EDITOR','vim')
+ editor = shlex.split(EDITOR)
+ editor.append(filename)
+
+ return editor
+
+ def _pager_shell_command(self, filename):
+ PAGER = os.environ.get('PAGER','less')
+ pager = shlex.split(PAGER)
+ pager.append(filename)
+
+ return pager
+
+########################################
+# CIPHERS #
+########################################
+
+class VaultAES(object):
+
+ # this version has been obsoleted by the VaultAES256 class
+ # which uses encrypt-then-mac (fixing order) and also improving the KDF used
+ # code remains for upgrade purposes only
+ # http://stackoverflow.com/a/16761459
+
+ def __init__(self):
+ if not md5:
+ raise errors.AnsibleError('md5 hash is unavailable (Could be due to FIPS mode). Legacy VaultAES format is unavailable.')
+ if not HAS_AES:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ def aes_derive_key_and_iv(self, password, salt, key_length, iv_length):
+
+ """ Create a key and an initialization vector """
+
+ d = d_i = ''
+ while len(d) < key_length + iv_length:
+ d_i = md5(d_i + password + salt).digest()
+ d += d_i
+
+ key = d[:key_length]
+ iv = d[key_length:key_length+iv_length]
+
+ return key, iv
+
+ def encrypt(self, data, password, key_length=32):
+
+ """ Read plaintext data from in_file and write encrypted to out_file """
+
+
+ # combine sha + data
+ this_sha = sha256(data).hexdigest()
+ tmp_data = this_sha + "\n" + data
+
+ in_file = BytesIO(tmp_data)
+ in_file.seek(0)
+ out_file = BytesIO()
+
+ bs = AES.block_size
+
+ # Get a block of random data. EL does not have Crypto.Random.new()
+ # so os.urandom is used for cross platform purposes
+ salt = os.urandom(bs - len('Salted__'))
+
+ key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ out_file.write('Salted__' + salt)
+ finished = False
+ while not finished:
+ chunk = in_file.read(1024 * bs)
+ if len(chunk) == 0 or len(chunk) % bs != 0:
+ padding_length = (bs - len(chunk) % bs) or bs
+ chunk += padding_length * chr(padding_length)
+ finished = True
+ out_file.write(cipher.encrypt(chunk))
+
+ out_file.seek(0)
+ enc_data = out_file.read()
+ tmp_data = hexlify(enc_data)
+
+ return tmp_data
+
+
+ def decrypt(self, data, password, key_length=32):
+
+ """ Read encrypted data from in_file and write decrypted to out_file """
+
+ # http://stackoverflow.com/a/14989032
+
+ data = ''.join(data.split('\n'))
+ data = unhexlify(data)
+
+ in_file = BytesIO(data)
+ in_file.seek(0)
+ out_file = BytesIO()
+
+ bs = AES.block_size
+ salt = in_file.read(bs)[len('Salted__'):]
+ key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ next_chunk = ''
+ finished = False
+
+ while not finished:
+ chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))
+ if len(next_chunk) == 0:
+ padding_length = ord(chunk[-1])
+ chunk = chunk[:-padding_length]
+ finished = True
+ out_file.write(chunk)
+
+ # reset the stream pointer to the beginning
+ out_file.seek(0)
+ new_data = out_file.read()
+
+ # split out sha and verify decryption
+ split_data = new_data.split("\n")
+ this_sha = split_data[0]
+ this_data = '\n'.join(split_data[1:])
+ test_sha = sha256(this_data).hexdigest()
+
+ if this_sha != test_sha:
+ raise errors.AnsibleError("Decryption failed")
+
+ #return out_file.read()
+ return this_data
+
+
+class VaultAES256(object):
+
+ """
+ Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
+ Keys are derived using PBKDF2
+ """
+
+ # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
+
+ def __init__(self):
+
+ if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ def gen_key_initctr(self, password, salt):
+ # 16 for AES 128, 32 for AES256
+ keylength = 32
+
+ # match the size used for counter.new to avoid extra work
+ ivlength = 16
+
+ hash_function = SHA256
+
+ # make two keys and one iv
+ pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest()
+
+
+ derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength,
+ count=10000, prf=pbkdf2_prf)
+
+ key1 = derivedkey[:keylength]
+ key2 = derivedkey[keylength:(keylength * 2)]
+ iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength]
+
+ return key1, key2, hexlify(iv)
+
+
+ def encrypt(self, data, password):
+
+ salt = os.urandom(32)
+ key1, key2, iv = self.gen_key_initctr(password, salt)
+
+ # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3
+ bs = AES.block_size
+ padding_length = (bs - len(data) % bs) or bs
+ data += padding_length * chr(padding_length)
+
+ # COUNTER.new PARAMETERS
+ # 1) nbits (integer) - Length of the counter, in bits.
+ # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr
+
+ ctr = Counter.new(128, initial_value=long(iv, 16))
+
+ # AES.new PARAMETERS
+ # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr
+ # 2) MODE_CTR, is the recommended mode
+ # 3) counter=
+
+ cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
+
+ # ENCRYPT PADDED DATA
+ cryptedData = cipher.encrypt(data)
+
+ # COMBINE SALT, DIGEST AND DATA
+ hmac = HMAC.new(key2, cryptedData, SHA256)
+ message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) )
+ message = hexlify(message)
+ return message
+
+ def decrypt(self, data, password):
+
+ # SPLIT SALT, DIGEST, AND DATA
+ data = ''.join(data.split("\n"))
+ data = unhexlify(data)
+ salt, cryptedHmac, cryptedData = data.split("\n", 2)
+ salt = unhexlify(salt)
+ cryptedData = unhexlify(cryptedData)
+
+ key1, key2, iv = self.gen_key_initctr(password, salt)
+
+ # EXIT EARLY IF DIGEST DOESN'T MATCH
+ hmacDecrypt = HMAC.new(key2, cryptedData, SHA256)
+ if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()):
+ return None
+
+ # SET THE COUNTER AND THE CIPHER
+ ctr = Counter.new(128, initial_value=long(iv, 16))
+ cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
+
+ # DECRYPT PADDED DATA
+ decryptedData = cipher.decrypt(cryptedData)
+
+ # UNPAD DATA
+ padding_length = ord(decryptedData[-1])
+ decryptedData = decryptedData[:-padding_length]
+
+ return decryptedData
+
+ def is_equal(self, a, b):
+ # http://codahale.com/a-lesson-in-timing-attacks/
+ if len(a) != len(b):
+ return False
+
+ result = 0
+ for x, y in zip(a, b):
+ result |= ord(x) ^ ord(y)
+ return result == 0
+
+
diff --git a/v1/bin/ansible b/v1/bin/ansible
new file mode 100755
index 00000000000..7fec34ec81e
--- /dev/null
+++ b/v1/bin/ansible
@@ -0,0 +1,207 @@
+#!/usr/bin/env python
+
+# (c) 2012, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+########################################################
+
+__requires__ = ['ansible']
+try:
+ import pkg_resources
+except Exception:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. But we
+ # have code that better expresses the errors in the places where the code
+ # is actually used (the deps are optional for many code paths) so we don't
+ # want to fail here.
+ pass
+
+import os
+import sys
+
+from ansible.runner import Runner
+import ansible.constants as C
+from ansible import utils
+from ansible import errors
+from ansible import callbacks
+from ansible import inventory
+########################################################
+
+class Cli(object):
+ ''' code behind bin/ansible '''
+
+ # ----------------------------------------------
+
+ def __init__(self):
+ self.stats = callbacks.AggregateStats()
+ self.callbacks = callbacks.CliRunnerCallbacks()
+ if C.DEFAULT_LOAD_CALLBACK_PLUGINS:
+ callbacks.load_callback_plugins()
+
+ # ----------------------------------------------
+
+ def parse(self):
+ ''' create an options parser for bin/ansible '''
+
+ parser = utils.base_parser(
+ constants=C,
+ runas_opts=True,
+ subset_opts=True,
+ async_opts=True,
+ output_opts=True,
+ connect_opts=True,
+ check_opts=True,
+ diff_opts=False,
+ usage='%prog [options]'
+ )
+
+ parser.add_option('-a', '--args', dest='module_args',
+ help="module arguments", default=C.DEFAULT_MODULE_ARGS)
+ parser.add_option('-m', '--module-name', dest='module_name',
+ help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
+ default=C.DEFAULT_MODULE_NAME)
+
+ options, args = parser.parse_args()
+ self.callbacks.options = options
+
+ if len(args) == 0 or len(args) > 1:
+ parser.print_help()
+ sys.exit(1)
+
+ # privlege escalation command line arguments need to be mutually exclusive
+ utils.check_mutually_exclusive_privilege(options, parser)
+
+ if (options.ask_vault_pass and options.vault_password_file):
+ parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
+
+ return (options, args)
+
+ # ----------------------------------------------
+
+ def run(self, options, args):
+ ''' use Runner lib to do SSH things '''
+
+ pattern = args[0]
+
+ sshpass = becomepass = vault_pass = become_method = None
+
+ # Never ask for an SSH password when we run with local connection
+ if options.connection == "local":
+ options.ask_pass = False
+ else:
+ options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
+
+ options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
+
+ # become
+ utils.normalize_become_options(options)
+ prompt_method = utils.choose_pass_prompt(options)
+ (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method)
+
+ # read vault_pass from a file
+ if not options.ask_vault_pass and options.vault_password_file:
+ vault_pass = utils.read_vault_file(options.vault_password_file)
+
+ extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
+
+ inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass)
+ if options.subset:
+ inventory_manager.subset(options.subset)
+ hosts = inventory_manager.list_hosts(pattern)
+
+ if len(hosts) == 0:
+ callbacks.display("No hosts matched", stderr=True)
+ sys.exit(0)
+
+ if options.listhosts:
+ for host in hosts:
+ callbacks.display(' %s' % host)
+ sys.exit(0)
+
+ if options.module_name in ['command','shell'] and not options.module_args:
+ callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True)
+ sys.exit(1)
+
+ if options.tree:
+ utils.prepare_writeable_dir(options.tree)
+
+ runner = Runner(
+ module_name=options.module_name,
+ module_path=options.module_path,
+ module_args=options.module_args,
+ remote_user=options.remote_user,
+ remote_pass=sshpass,
+ inventory=inventory_manager,
+ timeout=options.timeout,
+ private_key_file=options.private_key_file,
+ forks=options.forks,
+ pattern=pattern,
+ callbacks=self.callbacks,
+ transport=options.connection,
+ subset=options.subset,
+ check=options.check,
+ diff=options.check,
+ vault_pass=vault_pass,
+ become=options.become,
+ become_method=options.become_method,
+ become_pass=becomepass,
+ become_user=options.become_user,
+ extra_vars=extra_vars,
+ )
+
+ if options.seconds:
+ callbacks.display("background launch...\n\n", color='cyan')
+ results, poller = runner.run_async(options.seconds)
+ results = self.poll_while_needed(poller, options)
+ else:
+ results = runner.run()
+
+ return (runner, results)
+
+ # ----------------------------------------------
+
+ def poll_while_needed(self, poller, options):
+ ''' summarize results from Runner '''
+
+ # BACKGROUND POLL LOGIC when -B and -P are specified
+ if options.seconds and options.poll_interval > 0:
+ poller.wait(options.seconds, options.poll_interval)
+
+ return poller.results
+
+
+########################################################
+
+if __name__ == '__main__':
+ callbacks.display("", log_only=True)
+ callbacks.display(" ".join(sys.argv), log_only=True)
+ callbacks.display("", log_only=True)
+
+ cli = Cli()
+ (options, args) = cli.parse()
+ try:
+ (runner, results) = cli.run(options, args)
+ for result in results['contacted'].values():
+ if 'failed' in result or result.get('rc', 0) != 0:
+ sys.exit(2)
+ if results['dark']:
+ sys.exit(3)
+ except errors.AnsibleError, e:
+ # Generic handler for ansible specific errors
+ callbacks.display("ERROR: %s" % str(e), stderr=True, color='red')
+ sys.exit(1)
+
diff --git a/v1/bin/ansible-doc b/v1/bin/ansible-doc
new file mode 100755
index 00000000000..dff7cecce79
--- /dev/null
+++ b/v1/bin/ansible-doc
@@ -0,0 +1,337 @@
+#!/usr/bin/env python
+
+# (c) 2012, Jan-Piet Mens
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+import os
+import sys
+import textwrap
+import re
+import optparse
+import datetime
+import subprocess
+import fcntl
+import termios
+import struct
+
+from ansible import utils
+from ansible.utils import module_docs
+import ansible.constants as C
+from ansible.utils import version
+import traceback
+
+MODULEDIR = C.DEFAULT_MODULE_PATH
+
+BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
+IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
+
+_ITALIC = re.compile(r"I\(([^)]+)\)")
+_BOLD = re.compile(r"B\(([^)]+)\)")
+_MODULE = re.compile(r"M\(([^)]+)\)")
+_URL = re.compile(r"U\(([^)]+)\)")
+_CONST = re.compile(r"C\(([^)]+)\)")
+PAGER = 'less'
+LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
+ # -S (chop long lines) -X (disable termcap init and de-init)
+
+def pager_print(text):
+ ''' just print text '''
+ print text
+
+def pager_pipe(text, cmd):
+ ''' pipe text through a pager '''
+ if 'LESS' not in os.environ:
+ os.environ['LESS'] = LESS_OPTS
+ try:
+ cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
+ cmd.communicate(input=text)
+ except IOError:
+ pass
+ except KeyboardInterrupt:
+ pass
+
+def pager(text):
+ ''' find reasonable way to display text '''
+ # this is a much simpler form of what is in pydoc.py
+ if not sys.stdout.isatty():
+ pager_print(text)
+ elif 'PAGER' in os.environ:
+ if sys.platform == 'win32':
+ pager_print(text)
+ else:
+ pager_pipe(text, os.environ['PAGER'])
+ elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
+ pager_pipe(text, 'less')
+ else:
+ pager_print(text)
+
+def tty_ify(text):
+
+ t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
+ t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
+ t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
+ t = _URL.sub(r"\1", t) # U(word) => word
+ t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
+
+ return t
+
+def get_man_text(doc):
+
+ opt_indent=" "
+ text = []
+ text.append("> %s\n" % doc['module'].upper())
+
+ desc = " ".join(doc['description'])
+
+ text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
+
+ if 'option_keys' in doc and len(doc['option_keys']) > 0:
+ text.append("Options (= is mandatory):\n")
+
+ for o in sorted(doc['option_keys']):
+ opt = doc['options'][o]
+
+ if opt.get('required', False):
+ opt_leadin = "="
+ else:
+ opt_leadin = "-"
+
+ text.append("%s %s" % (opt_leadin, o))
+
+ desc = " ".join(opt['description'])
+
+ if 'choices' in opt:
+ choices = ", ".join(str(i) for i in opt['choices'])
+ desc = desc + " (Choices: " + choices + ")"
+ if 'default' in opt:
+ default = str(opt['default'])
+ desc = desc + " [Default: " + default + "]"
+ text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent,
+ subsequent_indent=opt_indent))
+
+ if 'notes' in doc and len(doc['notes']) > 0:
+ notes = " ".join(doc['notes'])
+ text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ",
+ subsequent_indent=opt_indent))
+
+
+ if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
+ req = ", ".join(doc['requirements'])
+ text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ",
+ subsequent_indent=opt_indent))
+
+ if 'examples' in doc and len(doc['examples']) > 0:
+ text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
+ for ex in doc['examples']:
+ text.append("%s\n" % (ex['code']))
+
+ if 'plainexamples' in doc and doc['plainexamples'] is not None:
+ text.append("EXAMPLES:")
+ text.append(doc['plainexamples'])
+ if 'returndocs' in doc and doc['returndocs'] is not None:
+ text.append("RETURN VALUES:")
+ text.append(doc['returndocs'])
+ text.append('')
+
+ return "\n".join(text)
+
+
+def get_snippet_text(doc):
+
+ text = []
+ desc = tty_ify(" ".join(doc['short_description']))
+ text.append("- name: %s" % (desc))
+ text.append(" action: %s" % (doc['module']))
+
+ for o in sorted(doc['options'].keys()):
+ opt = doc['options'][o]
+ desc = tty_ify(" ".join(opt['description']))
+
+ if opt.get('required', False):
+ s = o + "="
+ else:
+ s = o
+
+ text.append(" %-20s # %s" % (s, desc))
+ text.append('')
+
+ return "\n".join(text)
+
+def get_module_list_text(module_list):
+ tty_size = 0
+ if os.isatty(0):
+ tty_size = struct.unpack('HHHH',
+ fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
+ columns = max(60, tty_size)
+ displace = max(len(x) for x in module_list)
+ linelimit = columns - displace - 5
+ text = []
+ deprecated = []
+ for module in sorted(set(module_list)):
+
+ if module in module_docs.BLACKLIST_MODULES:
+ continue
+
+ filename = utils.plugins.module_finder.find_plugin(module)
+
+ if filename is None:
+ continue
+ if filename.endswith(".ps1"):
+ continue
+ if os.path.isdir(filename):
+ continue
+
+ try:
+ doc, plainexamples, returndocs = module_docs.get_docstring(filename)
+ desc = tty_ify(doc.get('short_description', '?')).strip()
+ if len(desc) > linelimit:
+ desc = desc[:linelimit] + '...'
+
+ if module.startswith('_'): # Handle deprecated
+ deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
+ else:
+ text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
+ except:
+ traceback.print_exc()
+ sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
+
+ if len(deprecated) > 0:
+ text.append("\nDEPRECATED:")
+ text.extend(deprecated)
+ return "\n".join(text)
+
+def find_modules(path, module_list):
+
+ if os.path.isdir(path):
+ for module in os.listdir(path):
+ if module.startswith('.'):
+ continue
+ elif os.path.isdir(module):
+ find_modules(module, module_list)
+ elif any(module.endswith(x) for x in BLACKLIST_EXTS):
+ continue
+ elif module.startswith('__'):
+ continue
+ elif module in IGNORE_FILES:
+ continue
+ elif module.startswith('_'):
+ fullpath = '/'.join([path,module])
+ if os.path.islink(fullpath): # avoids aliases
+ continue
+
+ module = os.path.splitext(module)[0] # removes the extension
+ module_list.append(module)
+
+def main():
+
+ p = optparse.OptionParser(
+ version=version("%prog"),
+ usage='usage: %prog [options] [module...]',
+ description='Show Ansible module documentation',
+ )
+
+ p.add_option("-M", "--module-path",
+ action="store",
+ dest="module_path",
+ default=MODULEDIR,
+ help="Ansible modules/ directory")
+ p.add_option("-l", "--list",
+ action="store_true",
+ default=False,
+ dest='list_dir',
+ help='List available modules')
+ p.add_option("-s", "--snippet",
+ action="store_true",
+ default=False,
+ dest='show_snippet',
+ help='Show playbook snippet for specified module(s)')
+ p.add_option('-v', action='version', help='Show version number and exit')
+
+ (options, args) = p.parse_args()
+
+ if options.module_path is not None:
+ for i in options.module_path.split(os.pathsep):
+ utils.plugins.module_finder.add_directory(i)
+
+ if options.list_dir:
+ # list modules
+ paths = utils.plugins.module_finder._get_paths()
+ module_list = []
+ for path in paths:
+ find_modules(path, module_list)
+
+ pager(get_module_list_text(module_list))
+ sys.exit()
+
+ if len(args) == 0:
+ p.print_help()
+
+ def print_paths(finder):
+ ''' Returns a string suitable for printing of the search path '''
+
+ # Uses a list to get the order right
+ ret = []
+ for i in finder._get_paths():
+ if i not in ret:
+ ret.append(i)
+ return os.pathsep.join(ret)
+
+ text = ''
+ for module in args:
+
+ filename = utils.plugins.module_finder.find_plugin(module)
+ if filename is None:
+ sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder)))
+ continue
+
+ if any(filename.endswith(x) for x in BLACKLIST_EXTS):
+ continue
+
+ try:
+ doc, plainexamples, returndocs = module_docs.get_docstring(filename)
+ except:
+ traceback.print_exc()
+ sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
+ continue
+
+ if doc is not None:
+
+ all_keys = []
+ for (k,v) in doc['options'].iteritems():
+ all_keys.append(k)
+ all_keys = sorted(all_keys)
+ doc['option_keys'] = all_keys
+
+ doc['filename'] = filename
+ doc['docuri'] = doc['module'].replace('_', '-')
+ doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
+ doc['plainexamples'] = plainexamples
+ doc['returndocs'] = returndocs
+
+ if options.show_snippet:
+ text += get_snippet_text(doc)
+ else:
+ text += get_man_text(doc)
+ else:
+ # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
+ # probably a quoting issue.
+ sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module)
+ pager(text)
+
+if __name__ == '__main__':
+ main()
diff --git a/v1/bin/ansible-galaxy b/v1/bin/ansible-galaxy
new file mode 100755
index 00000000000..a6d625671ec
--- /dev/null
+++ b/v1/bin/ansible-galaxy
@@ -0,0 +1,957 @@
+#!/usr/bin/env python
+
+########################################################################
+#
+# (C) 2013, James Cammarata
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+########################################################################
+
+import datetime
+import json
+import os
+import os.path
+import shutil
+import subprocess
+import sys
+import tarfile
+import tempfile
+import urllib
+import urllib2
+import yaml
+
+from collections import defaultdict
+from distutils.version import LooseVersion
+from jinja2 import Environment
+from optparse import OptionParser
+
+import ansible.constants as C
+import ansible.utils
+from ansible.errors import AnsibleError
+
+default_meta_template = """---
+galaxy_info:
+ author: {{ author }}
+ description: {{description}}
+ company: {{ company }}
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+ # Some suggested licenses:
+ # - BSD (default)
+ # - MIT
+ # - GPLv2
+ # - GPLv3
+ # - Apache
+ # - CC-BY
+ license: {{ license }}
+ min_ansible_version: {{ min_ansible_version }}
+ #
+ # Below are all platforms currently available. Just uncomment
+ # the ones that apply to your role. If you don't see your
+ # platform on this list, let us know and we'll get it added!
+ #
+ #platforms:
+ {%- for platform,versions in platforms.iteritems() %}
+ #- name: {{ platform }}
+ # versions:
+ # - all
+ {%- for version in versions %}
+ # - {{ version }}
+ {%- endfor %}
+ {%- endfor %}
+ #
+ # Below are all categories currently available. Just as with
+ # the platforms above, uncomment those that apply to your role.
+ #
+ #categories:
+ {%- for category in categories %}
+ #- {{ category.name }}
+ {%- endfor %}
+dependencies: []
+ # List your role dependencies here, one per line.
+ # Be sure to remove the '[]' above if you add dependencies
+ # to this list.
+ {% for dependency in dependencies %}
+ #- {{ dependency }}
+ {% endfor %}
+
+"""
+
+default_readme_template = """Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
+"""
+
+#-------------------------------------------------------------------------------------
+# Utility functions for parsing actions/options
+#-------------------------------------------------------------------------------------
+
+VALID_ACTIONS = ("init", "info", "install", "list", "remove")
+SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
+
+def get_action(args):
+ """
+ Get the action the user wants to execute from the
+ sys argv list.
+ """
+ for i in range(0,len(args)):
+ arg = args[i]
+ if arg in VALID_ACTIONS:
+ del args[i]
+ return arg
+ return None
+
+def build_option_parser(action):
+ """
+ Builds an option parser object based on the action
+ the user wants to execute.
+ """
+
+ usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS)
+ epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
+ OptionParser.format_epilog = lambda self, formatter: self.epilog
+ parser = OptionParser(usage=usage, epilog=epilog)
+
+ if not action:
+ parser.print_help()
+ sys.exit()
+
+ # options for all actions
+ # - none yet
+
+ # options specific to actions
+ if action == "info":
+ parser.set_usage("usage: %prog info [options] role_name[,version]")
+ elif action == "init":
+ parser.set_usage("usage: %prog init [options] role_name")
+ parser.add_option(
+ '-p', '--init-path', dest='init_path', default="./",
+ help='The path in which the skeleton role will be created. '
+ 'The default is the current working directory.')
+ parser.add_option(
+ '--offline', dest='offline', default=False, action='store_true',
+ help="Don't query the galaxy API when creating roles")
+ elif action == "install":
+ parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
+ parser.add_option(
+ '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
+ help='Ignore errors and continue with the next specified role.')
+ parser.add_option(
+ '-n', '--no-deps', dest='no_deps', action='store_true', default=False,
+ help='Don\'t download roles listed as dependencies')
+ parser.add_option(
+ '-r', '--role-file', dest='role_file',
+ help='A file containing a list of roles to be imported')
+ elif action == "remove":
+ parser.set_usage("usage: %prog remove role1 role2 ...")
+ elif action == "list":
+ parser.set_usage("usage: %prog list [role_name]")
+
+ # options that apply to more than one action
+ if action != "init":
+ parser.add_option(
+ '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
+ help='The path to the directory containing your roles. '
+ 'The default is the roles_path configured in your '
+ 'ansible.cfg file (/etc/ansible/roles if not configured)')
+
+ if action in ("info","init","install"):
+ parser.add_option(
+ '-s', '--server', dest='api_server', default="galaxy.ansible.com",
+ help='The API server destination')
+
+ if action in ("init","install"):
+ parser.add_option(
+ '-f', '--force', dest='force', action='store_true', default=False,
+ help='Force overwriting an existing role')
+ # done, return the parser
+ return parser
+
+def get_opt(options, k, defval=""):
+ """
+ Returns an option from an Optparse values instance.
+ """
+ try:
+ data = getattr(options, k)
+ except:
+ return defval
+ if k == "roles_path":
+ if os.pathsep in data:
+ data = data.split(os.pathsep)[0]
+ return data
+
+def exit_without_ignore(options, rc=1):
+ """
+ Exits with the specified return code unless the
+ option --ignore-errors was specified
+ """
+
+ if not get_opt(options, "ignore_errors", False):
+ print '- you can use --ignore-errors to skip failed roles.'
+ sys.exit(rc)
+
+
+#-------------------------------------------------------------------------------------
+# Galaxy API functions
+#-------------------------------------------------------------------------------------
+
+def api_get_config(api_server):
+ """
+ Fetches the Galaxy API current version to ensure
+ the API server is up and reachable.
+ """
+
+ try:
+ url = 'https://%s/api/' % api_server
+ data = json.load(urllib2.urlopen(url))
+ if not data.get("current_version",None):
+ return None
+ else:
+ return data
+ except:
+ return None
+
+def api_lookup_role_by_name(api_server, role_name, notify=True):
+ """
+ Uses the Galaxy API to do a lookup on the role owner/name.
+ """
+
+ role_name = urllib.quote(role_name)
+
+ try:
+ parts = role_name.split(".")
+ user_name = ".".join(parts[0:-1])
+ role_name = parts[-1]
+ if notify:
+ print "- downloading role '%s', owned by %s" % (role_name, user_name)
+ except:
+ parser.print_help()
+ print "- invalid role name (%s). Specify role as format: username.rolename" % role_name
+ sys.exit(1)
+
+ url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name)
+ try:
+ data = json.load(urllib2.urlopen(url))
+ if len(data["results"]) == 0:
+ return None
+ else:
+ return data["results"][0]
+ except:
+ return None
+
+def api_fetch_role_related(api_server, related, role_id):
+ """
+ Uses the Galaxy API to fetch the list of related items for
+ the given role. The url comes from the 'related' field of
+ the role.
+ """
+
+ try:
+ url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related)
+ data = json.load(urllib2.urlopen(url))
+ results = data['results']
+ done = (data.get('next', None) == None)
+ while not done:
+ url = 'https://%s%s' % (api_server, data['next'])
+ print url
+ data = json.load(urllib2.urlopen(url))
+ results += data['results']
+ done = (data.get('next', None) == None)
+ return results
+ except:
+ return None
+
+def api_get_list(api_server, what):
+ """
+ Uses the Galaxy API to fetch the list of items specified.
+ """
+
+ try:
+ url = 'https://%s/api/v1/%s/?page_size' % (api_server, what)
+ data = json.load(urllib2.urlopen(url))
+ if "results" in data:
+ results = data['results']
+ else:
+ results = data
+ done = True
+ if "next" in data:
+ done = (data.get('next', None) == None)
+ while not done:
+ url = 'https://%s%s' % (api_server, data['next'])
+ print url
+ data = json.load(urllib2.urlopen(url))
+ results += data['results']
+ done = (data.get('next', None) == None)
+ return results
+ except:
+ print "- failed to download the %s list" % what
+ return None
+
+#-------------------------------------------------------------------------------------
+# scm repo utility functions
+#-------------------------------------------------------------------------------------
+
+def scm_archive_role(scm, role_url, role_version, role_name):
+ if scm not in ['hg', 'git']:
+ print "- scm %s is not currently supported" % scm
+ return False
+ tempdir = tempfile.mkdtemp()
+ clone_cmd = [scm, 'clone', role_url, role_name]
+ with open('/dev/null', 'w') as devnull:
+ try:
+ print "- executing: %s" % " ".join(clone_cmd)
+ popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
+ except:
+ raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
+ rc = popen.wait()
+ if rc != 0:
+ print "- command %s failed" % ' '.join(clone_cmd)
+ print " in directory %s" % tempdir
+ return False
+
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
+ if scm == 'hg':
+ archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name]
+ if role_version:
+ archive_cmd.extend(['-r', role_version])
+ archive_cmd.append(temp_file.name)
+ if scm == 'git':
+ archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name]
+ if role_version:
+ archive_cmd.append(role_version)
+ else:
+ archive_cmd.append('HEAD')
+
+ with open('/dev/null', 'w') as devnull:
+ print "- executing: %s" % " ".join(archive_cmd)
+ popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name),
+ stderr=devnull, stdout=devnull)
+ rc = popen.wait()
+ if rc != 0:
+ print "- command %s failed" % ' '.join(archive_cmd)
+ print " in directory %s" % tempdir
+ return False
+
+ shutil.rmtree(tempdir, ignore_errors=True)
+
+ return temp_file.name
+
+
+#-------------------------------------------------------------------------------------
+# Role utility functions
+#-------------------------------------------------------------------------------------
+
+def get_role_path(role_name, options):
+ """
+ Returns the role path based on the roles_path option
+ and the role name.
+ """
+ roles_path = get_opt(options,'roles_path')
+ roles_path = os.path.join(roles_path, role_name)
+ roles_path = os.path.expanduser(roles_path)
+ return roles_path
+
+def get_role_metadata(role_name, options):
+ """
+ Returns the metadata as YAML, if the file 'meta/main.yml'
+ exists in the specified role_path
+ """
+ role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml')
+ try:
+ if os.path.isfile(role_path):
+ f = open(role_path, 'r')
+ meta_data = yaml.safe_load(f)
+ f.close()
+ return meta_data
+ else:
+ return None
+ except:
+ return None
+
+def get_galaxy_install_info(role_name, options):
+ """
+ Returns the YAML data contained in 'meta/.galaxy_install_info',
+ if it exists.
+ """
+
+ try:
+ info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
+ if os.path.isfile(info_path):
+ f = open(info_path, 'r')
+ info_data = yaml.safe_load(f)
+ f.close()
+ return info_data
+ else:
+ return None
+ except:
+ return None
+
+def write_galaxy_install_info(role_name, role_version, options):
+ """
+ Writes a YAML-formatted file to the role's meta/ directory
+ (named .galaxy_install_info) which contains some information
+ we can use later for commands like 'list' and 'info'.
+ """
+
+ info = dict(
+ version = role_version,
+ install_date = datetime.datetime.utcnow().strftime("%c"),
+ )
+ try:
+ info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
+ f = open(info_path, 'w+')
+ info_data = yaml.safe_dump(info, f)
+ f.close()
+ except:
+ return False
+ return True
+
+
+def remove_role(role_name, options):
+ """
+ Removes the specified role from the roles path. There is a
+ sanity check to make sure there's a meta/main.yml file at this
+ path so the user doesn't blow away random directories
+ """
+ if get_role_metadata(role_name, options):
+ role_path = get_role_path(role_name, options)
+ shutil.rmtree(role_path)
+ return True
+ else:
+ return False
+
+def fetch_role(role_name, target, role_data, options):
+ """
+ Downloads the archived role from github to a temp location, extracts
+ it, and then copies the extracted role to the role library path.
+ """
+
+ # first grab the file and save it to a temp location
+ if '://' in role_name:
+ archive_url = role_name
+ else:
+ archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
+ print "- downloading role from %s" % archive_url
+
+ try:
+ url_file = urllib2.urlopen(archive_url)
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
+ data = url_file.read()
+ while data:
+ temp_file.write(data)
+ data = url_file.read()
+ temp_file.close()
+ return temp_file.name
+ except Exception, e:
+ # TODO: better urllib2 error handling for error
+ # messages that are more exact
+ print "- error: failed to download the file."
+ return False
+
+def install_role(role_name, role_version, role_filename, options):
+ # the file is a tar, so open it that way and extract it
+ # to the specified (or default) roles directory
+
+ if not tarfile.is_tarfile(role_filename):
+ print "- error: the file downloaded was not a tar.gz"
+ return False
+ else:
+ if role_filename.endswith('.gz'):
+ role_tar_file = tarfile.open(role_filename, "r:gz")
+ else:
+ role_tar_file = tarfile.open(role_filename, "r")
+ # verify the role's meta file
+ meta_file = None
+ members = role_tar_file.getmembers()
+ # next find the metadata file
+ for member in members:
+ if "/meta/main.yml" in member.name:
+ meta_file = member
+ break
+ if not meta_file:
+ print "- error: this role does not appear to have a meta/main.yml file."
+ return False
+ else:
+ try:
+ meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file))
+ except:
+ print "- error: this role does not appear to have a valid meta/main.yml file."
+ return False
+
+ # we strip off the top-level directory for all of the files contained within
+ # the tar file here, since the default is 'github_repo-target', and change it
+ # to the specified role's name
+ role_path = os.path.join(get_opt(options, 'roles_path'), role_name)
+ role_path = os.path.expanduser(role_path)
+ print "- extracting %s to %s" % (role_name, role_path)
+ try:
+ if os.path.exists(role_path):
+ if not os.path.isdir(role_path):
+ print "- error: the specified roles path exists and is not a directory."
+ return False
+ elif not get_opt(options, "force", False):
+ print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name
+ return False
+ else:
+ # using --force, remove the old path
+ if not remove_role(role_name, options):
+ print "- error: %s doesn't appear to contain a role." % role_path
+ print " please remove this directory manually if you really want to put the role here."
+ return False
+ else:
+ os.makedirs(role_path)
+
+ # now we do the actual extraction to the role_path
+ for member in members:
+ # we only extract files, and remove any relative path
+ # bits that might be in the file for security purposes
+ # and drop the leading directory, as mentioned above
+ if member.isreg() or member.issym():
+ parts = member.name.split("/")[1:]
+ final_parts = []
+ for part in parts:
+ if part != '..' and '~' not in part and '$' not in part:
+ final_parts.append(part)
+ member.name = os.path.join(*final_parts)
+ role_tar_file.extract(member, role_path)
+
+ # write out the install info file for later use
+ write_galaxy_install_info(role_name, role_version, options)
+ except OSError, e:
+ print "- error: you do not have permission to modify files in %s" % role_path
+ return False
+
+ # return the parsed yaml metadata
+ print "- %s was installed successfully" % role_name
+ return meta_file_data
+
+#-------------------------------------------------------------------------------------
+# Action functions
+#-------------------------------------------------------------------------------------
+
+def execute_init(args, options, parser):
+ """
+ Executes the init action, which creates the skeleton framework
+ of a role that complies with the galaxy metadata format.
+ """
+
+ init_path = get_opt(options, 'init_path', './')
+ api_server = get_opt(options, "api_server", "galaxy.ansible.com")
+ force = get_opt(options, 'force', False)
+ offline = get_opt(options, 'offline', False)
+
+ if not offline:
+ api_config = api_get_config(api_server)
+ if not api_config:
+ print "- the API server (%s) is not responding, please try again later." % api_server
+ sys.exit(1)
+
+ try:
+ role_name = args.pop(0).strip()
+ if role_name == "":
+ raise Exception("")
+ role_path = os.path.join(init_path, role_name)
+ if os.path.exists(role_path):
+ if os.path.isfile(role_path):
+ print "- the path %s already exists, but is a file - aborting" % role_path
+ sys.exit(1)
+ elif not force:
+ print "- the directory %s already exists." % role_path
+ print " you can use --force to re-initialize this directory,\n" + \
+ " however it will reset any main.yml files that may have\n" + \
+ " been modified there already."
+ sys.exit(1)
+ except Exception, e:
+ parser.print_help()
+ print "- no role name specified for init"
+ sys.exit(1)
+
+ ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
+
+ # create the default README.md
+ if not os.path.exists(role_path):
+ os.makedirs(role_path)
+ readme_path = os.path.join(role_path, "README.md")
+ f = open(readme_path, "wb")
+ f.write(default_readme_template)
+ f.close
+
+ for dir in ROLE_DIRS:
+ dir_path = os.path.join(init_path, role_name, dir)
+ main_yml_path = os.path.join(dir_path, 'main.yml')
+ # create the directory if it doesn't exist already
+ if not os.path.exists(dir_path):
+ os.makedirs(dir_path)
+
+ # now create the main.yml file for that directory
+ if dir == "meta":
+ # create a skeleton meta/main.yml with a valid galaxy_info
+ # datastructure in place, plus with all of the available
+ # tags/platforms included (but commented out) and the
+ # dependencies section
+ platforms = []
+ if not offline:
+ platforms = api_get_list(api_server, "platforms") or []
+ categories = []
+ if not offline:
+ categories = api_get_list(api_server, "categories") or []
+
+ # group the list of platforms from the api based
+ # on their names, with the release field being
+ # appended to a list of versions
+ platform_groups = defaultdict(list)
+ for platform in platforms:
+ platform_groups[platform['name']].append(platform['release'])
+ platform_groups[platform['name']].sort()
+
+ inject = dict(
+ author = 'your name',
+ company = 'your company (optional)',
+ license = 'license (GPLv2, CC-BY, etc)',
+ issue_tracker_url = 'http://example.com/issue/tracker',
+ min_ansible_version = '1.2',
+ platforms = platform_groups,
+ categories = categories,
+ )
+ rendered_meta = Environment().from_string(default_meta_template).render(inject)
+ f = open(main_yml_path, 'w')
+ f.write(rendered_meta)
+ f.close()
+ pass
+ elif dir not in ('files','templates'):
+ # just write a (mostly) empty YAML file for main.yml
+ f = open(main_yml_path, 'w')
+ f.write('---\n# %s file for %s\n' % (dir,role_name))
+ f.close()
+ print "- %s was created successfully" % role_name
+
+def execute_info(args, options, parser):
+ """
+ Executes the info action. This action prints out detailed
+ information about an installed role as well as info available
+ from the galaxy API.
+ """
+
+ if len(args) == 0:
+ # the user needs to specify a role
+ parser.print_help()
+ print "- you must specify a user/role name"
+ sys.exit(1)
+
+ api_server = get_opt(options, "api_server", "galaxy.ansible.com")
+ api_config = api_get_config(api_server)
+ roles_path = get_opt(options, "roles_path")
+
+ for role in args:
+
+ role_info = {}
+
+ install_info = get_galaxy_install_info(role, options)
+ if install_info:
+ if 'version' in install_info:
+ install_info['intalled_version'] = install_info['version']
+ del install_info['version']
+ role_info.update(install_info)
+
+ remote_data = api_lookup_role_by_name(api_server, role, False)
+ if remote_data:
+ role_info.update(remote_data)
+
+ metadata = get_role_metadata(role, options)
+ if metadata:
+ role_info.update(metadata)
+
+ role_spec = ansible.utils.role_spec_parse(role)
+ if role_spec:
+ role_info.update(role_spec)
+
+ if role_info:
+ print "- %s:" % (role)
+ for k in sorted(role_info.keys()):
+
+ if k in SKIP_INFO_KEYS:
+ continue
+
+ if isinstance(role_info[k], dict):
+ print "\t%s: " % (k)
+ for key in sorted(role_info[k].keys()):
+ if key in SKIP_INFO_KEYS:
+ continue
+ print "\t\t%s: %s" % (key, role_info[k][key])
+ else:
+ print "\t%s: %s" % (k, role_info[k])
+ else:
+ print "- the role %s was not found" % role
+
+def execute_install(args, options, parser):
+ """
+ Executes the installation action. The args list contains the
+ roles to be installed, unless -f was specified. The list of roles
+ can be a name (which will be downloaded via the galaxy API and github),
+ or it can be a local .tar.gz file.
+ """
+
+ role_file = get_opt(options, "role_file", None)
+
+ if len(args) == 0 and role_file is None:
+ # the user needs to specify one of either --role-file
+ # or specify a single user/role name
+ parser.print_help()
+ print "- you must specify a user/role name or a roles file"
+ sys.exit()
+ elif len(args) == 1 and not role_file is None:
+ # using a role file is mutually exclusive of specifying
+ # the role name on the command line
+ parser.print_help()
+ print "- please specify a user/role name, or a roles file, but not both"
+ sys.exit(1)
+
+ api_server = get_opt(options, "api_server", "galaxy.ansible.com")
+ no_deps = get_opt(options, "no_deps", False)
+ roles_path = get_opt(options, "roles_path")
+
+ roles_done = []
+ if role_file:
+ f = open(role_file, 'r')
+ if role_file.endswith('.yaml') or role_file.endswith('.yml'):
+ roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f))
+ else:
+ # roles listed in a file, one per line
+ roles_left = map(ansible.utils.role_spec_parse, f.readlines())
+ f.close()
+ else:
+ # roles were specified directly, so we'll just go out grab them
+ # (and their dependencies, unless the user doesn't want us to).
+ roles_left = map(ansible.utils.role_spec_parse, args)
+
+ while len(roles_left) > 0:
+ # query the galaxy API for the role data
+ role_data = None
+ role = roles_left.pop(0)
+ role_src = role.get("src")
+ role_scm = role.get("scm")
+ role_path = role.get("path")
+
+ if role_path:
+ options.roles_path = role_path
+ else:
+ options.roles_path = roles_path
+
+ if os.path.isfile(role_src):
+ # installing a local tar.gz
+ tmp_file = role_src
+ else:
+ if role_scm:
+ # create tar file from scm url
+ tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name"))
+ elif '://' in role_src:
+ # just download a URL - version will probably be in the URL
+ tmp_file = fetch_role(role_src, None, None, options)
+ else:
+ # installing from galaxy
+ api_config = api_get_config(api_server)
+ if not api_config:
+ print "- the API server (%s) is not responding, please try again later." % api_server
+ sys.exit(1)
+
+ role_data = api_lookup_role_by_name(api_server, role_src)
+ if not role_data:
+ print "- sorry, %s was not found on %s." % (role_src, api_server)
+ exit_without_ignore(options)
+ continue
+
+ role_versions = api_fetch_role_related(api_server, 'versions', role_data['id'])
+ if "version" not in role or role['version'] == '':
+ # convert the version names to LooseVersion objects
+ # and sort them to get the latest version. If there
+ # are no versions in the list, we'll grab the head
+ # of the master branch
+ if len(role_versions) > 0:
+ loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
+ loose_versions.sort()
+ role["version"] = str(loose_versions[-1])
+ else:
+ role["version"] = 'master'
+ elif role['version'] != 'master':
+ if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]:
+ print 'role is %s' % role
+ print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions)
+ exit_without_ignore(options)
+ continue
+
+ # download the role. if --no-deps was specified, we stop here,
+ # otherwise we recursively grab roles and all of their deps.
+ tmp_file = fetch_role(role_src, role["version"], role_data, options)
+ installed = False
+ if tmp_file:
+ installed = install_role(role.get("name"), role.get("version"), tmp_file, options)
+ # we're done with the temp file, clean it up
+ if tmp_file != role_src:
+ os.unlink(tmp_file)
+ # install dependencies, if we want them
+ if not no_deps and installed:
+ if not role_data:
+ role_data = get_role_metadata(role.get("name"), options)
+ role_dependencies = role_data['dependencies']
+ else:
+ role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
+ for dep in role_dependencies:
+ if isinstance(dep, basestring):
+ dep = ansible.utils.role_spec_parse(dep)
+ else:
+ dep = ansible.utils.role_yaml_parse(dep)
+ if not get_role_metadata(dep["name"], options):
+ if dep not in roles_left:
+ print '- adding dependency: %s' % dep["name"]
+ roles_left.append(dep)
+ else:
+ print '- dependency %s already pending installation.' % dep["name"]
+ else:
+ print '- dependency %s is already installed, skipping.' % dep["name"]
+ if not tmp_file or not installed:
+ print "- %s was NOT installed successfully." % role.get("name")
+ exit_without_ignore(options)
+ sys.exit(0)
+
+def execute_remove(args, options, parser):
+ """
+ Executes the remove action. The args list contains the list
+ of roles to be removed. This list can contain more than one role.
+ """
+
+ if len(args) == 0:
+ parser.print_help()
+ print '- you must specify at least one role to remove.'
+ sys.exit()
+
+ for role in args:
+ if get_role_metadata(role, options):
+ if remove_role(role, options):
+ print '- successfully removed %s' % role
+ else:
+ print "- failed to remove role: %s" % role
+ else:
+ print '- %s is not installed, skipping.' % role
+ sys.exit(0)
+
+def execute_list(args, options, parser):
+ """
+ Executes the list action. The args list can contain zero
+ or one role. If one is specified, only that role will be
+ shown, otherwise all roles in the specified directory will
+ be shown.
+ """
+
+ if len(args) > 1:
+ print "- please specify only one role to list, or specify no roles to see a full list"
+ sys.exit(1)
+
+ if len(args) == 1:
+ # show only the request role, if it exists
+ role_name = args[0]
+ metadata = get_role_metadata(role_name, options)
+ if metadata:
+ install_info = get_galaxy_install_info(role_name, options)
+ version = None
+ if install_info:
+ version = install_info.get("version", None)
+ if not version:
+ version = "(unknown version)"
+ # show some more info about single roles here
+ print "- %s, %s" % (role_name, version)
+ else:
+ print "- the role %s was not found" % role_name
+ else:
+ # show all valid roles in the roles_path directory
+ roles_path = get_opt(options, 'roles_path')
+ roles_path = os.path.expanduser(roles_path)
+ if not os.path.exists(roles_path):
+ parser.print_help()
+ print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path
+ sys.exit(1)
+ elif not os.path.isdir(roles_path):
+ print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path
+ parser.print_help()
+ sys.exit(1)
+ path_files = os.listdir(roles_path)
+ for path_file in path_files:
+ if get_role_metadata(path_file, options):
+ install_info = get_galaxy_install_info(path_file, options)
+ version = None
+ if install_info:
+ version = install_info.get("version", None)
+ if not version:
+ version = "(unknown version)"
+ print "- %s, %s" % (path_file, version)
+ sys.exit(0)
+
+#-------------------------------------------------------------------------------------
+# The main entry point
+#-------------------------------------------------------------------------------------
+
+def main():
+ # parse the CLI options
+ action = get_action(sys.argv)
+ parser = build_option_parser(action)
+ (options, args) = parser.parse_args()
+
+ # execute the desired action
+ if 1: #try:
+ fn = globals()["execute_%s" % action]
+ fn(args, options, parser)
+ #except KeyError, e:
+ # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS))
+ # sys.exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/v1/bin/ansible-playbook b/v1/bin/ansible-playbook
new file mode 100755
index 00000000000..3d6e1f9f402
--- /dev/null
+++ b/v1/bin/ansible-playbook
@@ -0,0 +1,330 @@
+#!/usr/bin/env python
+# (C) 2012, Michael DeHaan,
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+#######################################################
+
+__requires__ = ['ansible']
+try:
+ import pkg_resources
+except Exception:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. But we
+ # have code that better expresses the errors in the places where the code
+ # is actually used (the deps are optional for many code paths) so we don't
+ # want to fail here.
+ pass
+
+import sys
+import os
+import stat
+
+# Augment PYTHONPATH to find Python modules relative to this file path
+# This is so that we can find the modules when running from a local checkout
+# installed as editable with `pip install -e ...` or `python setup.py develop`
+local_module_path = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '..', 'lib')
+)
+sys.path.append(local_module_path)
+
+import ansible.playbook
+import ansible.constants as C
+import ansible.utils.template
+from ansible import errors
+from ansible import callbacks
+from ansible import utils
+from ansible.color import ANSIBLE_COLOR, stringc
+from ansible.callbacks import display
+
+def colorize(lead, num, color):
+ """ Print 'lead' = 'num' in 'color' """
+ if num != 0 and ANSIBLE_COLOR and color is not None:
+ return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
+ else:
+ return "%s=%-4s" % (lead, str(num))
+
+def hostcolor(host, stats, color=True):
+ if ANSIBLE_COLOR and color:
+ if stats['failures'] != 0 or stats['unreachable'] != 0:
+ return "%-37s" % stringc(host, 'red')
+ elif stats['changed'] != 0:
+ return "%-37s" % stringc(host, 'yellow')
+ else:
+ return "%-37s" % stringc(host, 'green')
+ return "%-26s" % host
+
+
+def main(args):
+ ''' run ansible-playbook operations '''
+
+ # create parser for CLI options
+ parser = utils.base_parser(
+ constants=C,
+ usage = "%prog playbook.yml",
+ connect_opts=True,
+ runas_opts=True,
+ subset_opts=True,
+ check_opts=True,
+ diff_opts=True
+ )
+ #parser.add_option('--vault-password', dest="vault_password",
+ # help="password for vault encrypted files")
+ parser.add_option('-t', '--tags', dest='tags', default='all',
+ help="only run plays and tasks tagged with these values")
+ parser.add_option('--skip-tags', dest='skip_tags',
+ help="only run plays and tasks whose tags do not match these values")
+ parser.add_option('--syntax-check', dest='syntax', action='store_true',
+ help="perform a syntax check on the playbook, but do not execute it")
+ parser.add_option('--list-tasks', dest='listtasks', action='store_true',
+ help="list all tasks that would be executed")
+ parser.add_option('--list-tags', dest='listtags', action='store_true',
+ help="list all available tags")
+ parser.add_option('--step', dest='step', action='store_true',
+ help="one-step-at-a-time: confirm each task before running")
+ parser.add_option('--start-at-task', dest='start_at',
+ help="start the playbook at the task matching this name")
+ parser.add_option('--force-handlers', dest='force_handlers',
+ default=C.DEFAULT_FORCE_HANDLERS, action='store_true',
+ help="run handlers even if a task fails")
+ parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
+ help="clear the fact cache")
+
+ options, args = parser.parse_args(args)
+
+ if len(args) == 0:
+ parser.print_help(file=sys.stderr)
+ return 1
+
+ # privlege escalation command line arguments need to be mutually exclusive
+ utils.check_mutually_exclusive_privilege(options, parser)
+
+ if (options.ask_vault_pass and options.vault_password_file):
+ parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
+
+ sshpass = None
+ becomepass = None
+ vault_pass = None
+
+ options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
+
+ if options.listhosts or options.syntax or options.listtasks or options.listtags:
+ (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass)
+ else:
+ options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
+ # Never ask for an SSH password when we run with local connection
+ if options.connection == "local":
+ options.ask_pass = False
+
+ # set pe options
+ utils.normalize_become_options(options)
+ prompt_method = utils.choose_pass_prompt(options)
+ (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass,
+ become_ask_pass=options.become_ask_pass,
+ ask_vault_pass=options.ask_vault_pass,
+ become_method=prompt_method)
+
+ # read vault_pass from a file
+ if not options.ask_vault_pass and options.vault_password_file:
+ vault_pass = utils.read_vault_file(options.vault_password_file)
+
+ extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
+
+ only_tags = options.tags.split(",")
+ skip_tags = options.skip_tags
+ if options.skip_tags is not None:
+ skip_tags = options.skip_tags.split(",")
+
+ for playbook in args:
+ if not os.path.exists(playbook):
+ raise errors.AnsibleError("the playbook: %s could not be found" % playbook)
+ if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
+ raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook)
+
+ inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass)
+
+ # Note: slightly wrong, this is written so that implicit localhost
+ # (which is not returned in list_hosts()) is taken into account for
+ # warning if inventory is empty. But it can't be taken into account for
+ # checking if limit doesn't match any hosts. Instead we don't worry about
+ # limit if only implicit localhost was in inventory to start with.
+ #
+ # Fix this in v2
+ no_hosts = False
+ if len(inventory.list_hosts()) == 0:
+ # Empty inventory
+ utils.warning("provided hosts list is empty, only localhost is available")
+ no_hosts = True
+ inventory.subset(options.subset)
+ if len(inventory.list_hosts()) == 0 and no_hosts is False:
+ # Invalid limit
+ raise errors.AnsibleError("Specified --limit does not match any hosts")
+
+ # run all playbooks specified on the command line
+ for playbook in args:
+
+ stats = callbacks.AggregateStats()
+ playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
+ if options.step:
+ playbook_cb.step = options.step
+ if options.start_at:
+ playbook_cb.start_at = options.start_at
+ runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
+
+ pb = ansible.playbook.PlayBook(
+ playbook=playbook,
+ module_path=options.module_path,
+ inventory=inventory,
+ forks=options.forks,
+ remote_user=options.remote_user,
+ remote_pass=sshpass,
+ callbacks=playbook_cb,
+ runner_callbacks=runner_cb,
+ stats=stats,
+ timeout=options.timeout,
+ transport=options.connection,
+ become=options.become,
+ become_method=options.become_method,
+ become_user=options.become_user,
+ become_pass=becomepass,
+ extra_vars=extra_vars,
+ private_key_file=options.private_key_file,
+ only_tags=only_tags,
+ skip_tags=skip_tags,
+ check=options.check,
+ diff=options.diff,
+ vault_password=vault_pass,
+ force_handlers=options.force_handlers,
+ )
+
+ if options.flush_cache:
+ display(callbacks.banner("FLUSHING FACT CACHE"))
+ pb.SETUP_CACHE.flush()
+
+ if options.listhosts or options.listtasks or options.syntax or options.listtags:
+ print ''
+ print 'playbook: %s' % playbook
+ print ''
+ playnum = 0
+ for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs):
+ playnum += 1
+ play = ansible.playbook.Play(pb, play_ds, play_basedir,
+ vault_password=pb.vault_password)
+ label = play.name
+ hosts = pb.inventory.list_hosts(play.hosts)
+
+ if options.listhosts:
+ print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts))
+ for host in hosts:
+ print ' %s' % host
+
+ if options.listtags or options.listtasks:
+ print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags))))
+
+ if options.listtags:
+ tags = []
+ for task in pb.tasks_to_run_in_play(play):
+ tags.extend(task.tags)
+ print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged']))))
+
+ if options.listtasks:
+
+ for task in pb.tasks_to_run_in_play(play):
+ if getattr(task, 'name', None) is not None:
+ # meta tasks have no names
+ print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged']))))
+
+ if options.listhosts or options.listtasks or options.listtags:
+ print ''
+ continue
+
+ if options.syntax:
+ # if we've not exited by now then we are fine.
+ print 'Playbook Syntax is fine'
+ return 0
+
+ failed_hosts = []
+ unreachable_hosts = []
+
+ try:
+
+ pb.run()
+
+ hosts = sorted(pb.stats.processed.keys())
+ display(callbacks.banner("PLAY RECAP"))
+ playbook_cb.on_stats(pb.stats)
+
+ for h in hosts:
+ t = pb.stats.summarize(h)
+ if t['failures'] > 0:
+ failed_hosts.append(h)
+ if t['unreachable'] > 0:
+ unreachable_hosts.append(h)
+
+ retries = failed_hosts + unreachable_hosts
+
+ if C.RETRY_FILES_ENABLED and len(retries) > 0:
+ filename = pb.generate_retry_inventory(retries)
+ if filename:
+ display(" to retry, use: --limit @%s\n" % filename)
+
+ for h in hosts:
+ t = pb.stats.summarize(h)
+
+ display("%s : %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize('ok', t['ok'], 'green'),
+ colorize('changed', t['changed'], 'yellow'),
+ colorize('unreachable', t['unreachable'], 'red'),
+ colorize('failed', t['failures'], 'red')),
+ screen_only=True
+ )
+
+ display("%s : %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize('ok', t['ok'], None),
+ colorize('changed', t['changed'], None),
+ colorize('unreachable', t['unreachable'], None),
+ colorize('failed', t['failures'], None)),
+ log_only=True
+ )
+
+
+ print ""
+ if len(failed_hosts) > 0:
+ return 2
+ if len(unreachable_hosts) > 0:
+ return 3
+
+ except errors.AnsibleError, e:
+ display("ERROR: %s" % e, color='red')
+ return 1
+
+ return 0
+
+
+if __name__ == "__main__":
+ display(" ", log_only=True)
+ display(" ".join(sys.argv), log_only=True)
+ display(" ", log_only=True)
+ try:
+ sys.exit(main(sys.argv[1:]))
+ except errors.AnsibleError, e:
+ display("ERROR: %s" % e, color='red', stderr=True)
+ sys.exit(1)
+ except KeyboardInterrupt, ke:
+ display("ERROR: interrupted", color='red', stderr=True)
+ sys.exit(1)
diff --git a/v1/bin/ansible-pull b/v1/bin/ansible-pull
new file mode 100755
index 00000000000..d4887631e0f
--- /dev/null
+++ b/v1/bin/ansible-pull
@@ -0,0 +1,257 @@
+#!/usr/bin/env python
+
+# (c) 2012, Stephen Fromm
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+# ansible-pull is a script that runs ansible in local mode
+# after checking out a playbooks directory from source repo. There is an
+# example playbook to bootstrap this script in the examples/ dir which
+# installs ansible and sets it up to run on cron.
+
+# usage:
+# ansible-pull -d /var/lib/ansible \
+# -U http://example.net/content.git [-C production] \
+# [path/playbook.yml]
+#
+# the -d and -U arguments are required; the -C argument is optional.
+#
+# ansible-pull accepts an optional argument to specify a playbook
+# location underneath the workdir and then searches the source repo
+# for playbooks in the following order, stopping at the first match:
+#
+# 1. $workdir/path/playbook.yml, if specified
+# 2. $workdir/$fqdn.yml
+# 3. $workdir/$hostname.yml
+# 4. $workdir/local.yml
+#
+# the source repo must contain at least one of these playbooks.
+
+import os
+import shutil
+import sys
+import datetime
+import socket
+import random
+import time
+from ansible import utils
+from ansible.utils import cmd_functions
+from ansible import errors
+from ansible import inventory
+
+DEFAULT_REPO_TYPE = 'git'
+DEFAULT_PLAYBOOK = 'local.yml'
+PLAYBOOK_ERRORS = {1: 'File does not exist',
+ 2: 'File is not readable'}
+
+VERBOSITY=0
+
+def increment_debug(option, opt, value, parser):
+ global VERBOSITY
+ VERBOSITY += 1
+
+def try_playbook(path):
+ if not os.path.exists(path):
+ return 1
+ if not os.access(path, os.R_OK):
+ return 2
+ return 0
+
+
+def select_playbook(path, args):
+ playbook = None
+ if len(args) > 0 and args[0] is not None:
+ playbook = "%s/%s" % (path, args[0])
+ rc = try_playbook(playbook)
+ if rc != 0:
+ print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc])
+ return None
+ return playbook
+ else:
+ fqdn = socket.getfqdn()
+ hostpb = "%s/%s.yml" % (path, fqdn)
+ shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0])
+ localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK)
+ errors = []
+ for pb in [hostpb, shorthostpb, localpb]:
+ rc = try_playbook(pb)
+ if rc == 0:
+ playbook = pb
+ break
+ else:
+ errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc]))
+ if playbook is None:
+ print >>sys.stderr, "\n".join(errors)
+ return playbook
+
+
+def main(args):
+ """ Set up and run a local playbook """
+ usage = "%prog [options] [playbook.yml]"
+ parser = utils.SortedOptParser(usage=usage)
+ parser.add_option('--purge', default=False, action='store_true',
+ help='purge checkout after playbook run')
+ parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
+ help='only run the playbook if the repository has been updated')
+ parser.add_option('-s', '--sleep', dest='sleep', default=None,
+ help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests')
+ parser.add_option('-f', '--force', dest='force', default=False,
+ action='store_true',
+ help='run the playbook even if the repository could '
+ 'not be updated')
+ parser.add_option('-d', '--directory', dest='dest', default=None,
+ help='directory to checkout repository to')
+ #parser.add_option('-l', '--live', default=True, action='store_live',
+ # help='Print the ansible-playbook output while running')
+ parser.add_option('-U', '--url', dest='url', default=None,
+ help='URL of the playbook repository')
+ parser.add_option('-C', '--checkout', dest='checkout',
+ help='branch/tag/commit to checkout. '
+ 'Defaults to behavior of repository module.')
+ parser.add_option('-i', '--inventory-file', dest='inventory',
+ help="location of the inventory host file")
+ parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
+ help="set additional variables as key=value or YAML/JSON", default=[])
+ parser.add_option('-v', '--verbose', default=False, action="callback",
+ callback=increment_debug,
+ help='Pass -vvvv to ansible-playbook')
+ parser.add_option('-m', '--module-name', dest='module_name',
+ default=DEFAULT_REPO_TYPE,
+ help='Module name used to check out repository. '
+ 'Default is %s.' % DEFAULT_REPO_TYPE)
+ parser.add_option('--vault-password-file', dest='vault_password_file',
+ help="vault password file")
+ parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
+ help='ask for sudo password')
+ parser.add_option('-t', '--tags', dest='tags', default=False,
+ help='only run plays and tasks tagged with these values')
+ parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
+ help='adds the hostkey for the repo url if not already added')
+ parser.add_option('--key-file', dest='key_file',
+ help="Pass '-i ' to the SSH arguments used by git.")
+ options, args = parser.parse_args(args)
+
+ hostname = socket.getfqdn()
+ if not options.dest:
+ # use a hostname dependent directory, in case of $HOME on nfs
+ options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname)
+
+ options.dest = os.path.abspath(options.dest)
+
+ if not options.url:
+ parser.error("URL for repository not specified, use -h for help")
+ return 1
+
+ now = datetime.datetime.now()
+ print now.strftime("Starting ansible-pull at %F %T")
+
+ # Attempt to use the inventory passed in as an argument
+ # It might not yet have been downloaded so use localhost if note
+ if not options.inventory or not os.path.exists(options.inventory):
+ inv_opts = 'localhost,'
+ else:
+ inv_opts = options.inventory
+ limit_opts = 'localhost:%s:127.0.0.1' % hostname
+ repo_opts = "name=%s dest=%s" % (options.url, options.dest)
+
+ if VERBOSITY == 0:
+ base_opts = '-c local --limit "%s"' % limit_opts
+ elif VERBOSITY > 0:
+ debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ])
+ base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts)
+
+ if options.checkout:
+ repo_opts += ' version=%s' % options.checkout
+
+ # Only git module is supported
+ if options.module_name == DEFAULT_REPO_TYPE:
+ if options.accept_host_key:
+ repo_opts += ' accept_hostkey=yes'
+
+ if options.key_file:
+ repo_opts += ' key_file=%s' % options.key_file
+
+ path = utils.plugins.module_finder.find_plugin(options.module_name)
+ if path is None:
+ sys.stderr.write("module '%s' not found.\n" % options.module_name)
+ return 1
+
+ bin_path = os.path.dirname(os.path.abspath(__file__))
+ cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % (
+ bin_path, inv_opts, base_opts, options.module_name, repo_opts
+ )
+
+ for ev in options.extra_vars:
+ cmd += ' -e "%s"' % ev
+
+ if options.sleep:
+ try:
+ secs = random.randint(0,int(options.sleep));
+ except ValueError:
+ parser.error("%s is not a number." % options.sleep)
+ return 1
+
+ print >>sys.stderr, "Sleeping for %d seconds..." % secs
+ time.sleep(secs);
+
+
+ # RUN THe CHECKOUT COMMAND
+ rc, out, err = cmd_functions.run_cmd(cmd, live=True)
+
+ if rc != 0:
+ if options.force:
+ print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook."
+ else:
+ return rc
+ elif options.ifchanged and '"changed": true' not in out:
+ print "Repository has not changed, quitting."
+ return 0
+
+ playbook = select_playbook(options.dest, args)
+
+ if playbook is None:
+ print >>sys.stderr, "Could not find a playbook to run."
+ return 1
+
+ cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
+ if options.vault_password_file:
+ cmd += " --vault-password-file=%s" % options.vault_password_file
+ if options.inventory:
+ cmd += ' -i "%s"' % options.inventory
+ for ev in options.extra_vars:
+ cmd += ' -e "%s"' % ev
+ if options.ask_sudo_pass:
+ cmd += ' -K'
+ if options.tags:
+ cmd += ' -t "%s"' % options.tags
+ os.chdir(options.dest)
+
+ # RUN THE PLAYBOOK COMMAND
+ rc, out, err = cmd_functions.run_cmd(cmd, live=True)
+
+ if options.purge:
+ os.chdir('/')
+ try:
+ shutil.rmtree(options.dest)
+ except Exception, e:
+ print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e))
+
+ return rc
+
+if __name__ == '__main__':
+ try:
+ sys.exit(main(sys.argv[1:]))
+ except KeyboardInterrupt, e:
+ print >>sys.stderr, "Exit on user request.\n"
+ sys.exit(1)
diff --git a/v1/bin/ansible-vault b/v1/bin/ansible-vault
new file mode 100755
index 00000000000..22cfc0e1487
--- /dev/null
+++ b/v1/bin/ansible-vault
@@ -0,0 +1,241 @@
+#!/usr/bin/env python
+
+# (c) 2014, James Tanner
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+# ansible-vault is a script that encrypts/decrypts YAML files. See
+# http://docs.ansible.com/playbooks_vault.html for more details.
+
+__requires__ = ['ansible']
+try:
+ import pkg_resources
+except Exception:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. But we
+ # have code that better expresses the errors in the places where the code
+ # is actually used (the deps are optional for many code paths) so we don't
+ # want to fail here.
+ pass
+
+import os
+import sys
+import traceback
+
+import ansible.constants as C
+
+from ansible import utils
+from ansible import errors
+from ansible.utils.vault import VaultEditor
+
+from optparse import OptionParser
+
+#-------------------------------------------------------------------------------------
+# Utility functions for parsing actions/options
+#-------------------------------------------------------------------------------------
+
+VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
+
+def build_option_parser(action):
+ """
+ Builds an option parser object based on the action
+ the user wants to execute.
+ """
+
+ usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS)
+ epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
+ OptionParser.format_epilog = lambda self, formatter: self.epilog
+ parser = OptionParser(usage=usage, epilog=epilog)
+
+ if not action:
+ parser.print_help()
+ sys.exit()
+
+ # options for all actions
+ #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use")
+ parser.add_option('--debug', dest='debug', action="store_true", help="debug")
+ parser.add_option('--vault-password-file', dest='password_file',
+ help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE)
+
+ # options specific to actions
+ if action == "create":
+ parser.set_usage("usage: %prog create [options] file_name")
+ elif action == "decrypt":
+ parser.set_usage("usage: %prog decrypt [options] file_name")
+ elif action == "edit":
+ parser.set_usage("usage: %prog edit [options] file_name")
+ elif action == "view":
+ parser.set_usage("usage: %prog view [options] file_name")
+ elif action == "encrypt":
+ parser.set_usage("usage: %prog encrypt [options] file_name")
+ elif action == "rekey":
+ parser.set_usage("usage: %prog rekey [options] file_name")
+
+ # done, return the parser
+ return parser
+
+def get_action(args):
+ """
+ Get the action the user wants to execute from the
+ sys argv list.
+ """
+ for i in range(0,len(args)):
+ arg = args[i]
+ if arg in VALID_ACTIONS:
+ del args[i]
+ return arg
+ return None
+
+def get_opt(options, k, defval=""):
+ """
+ Returns an option from an Optparse values instance.
+ """
+ try:
+ data = getattr(options, k)
+ except:
+ return defval
+ if k == "roles_path":
+ if os.pathsep in data:
+ data = data.split(os.pathsep)[0]
+ return data
+
+#-------------------------------------------------------------------------------------
+# Command functions
+#-------------------------------------------------------------------------------------
+
+def execute_create(args, options, parser):
+ if len(args) > 1:
+ raise errors.AnsibleError("'create' does not accept more than one filename")
+
+ if not options.password_file:
+ password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ cipher = 'AES256'
+ if hasattr(options, 'cipher'):
+ cipher = options.cipher
+
+ this_editor = VaultEditor(cipher, password, args[0])
+ this_editor.create_file()
+
+def execute_decrypt(args, options, parser):
+
+ if not options.password_file:
+ password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ cipher = 'AES256'
+ if hasattr(options, 'cipher'):
+ cipher = options.cipher
+
+ for f in args:
+ this_editor = VaultEditor(cipher, password, f)
+ this_editor.decrypt_file()
+
+ print "Decryption successful"
+
+def execute_edit(args, options, parser):
+
+ if len(args) > 1:
+ raise errors.AnsibleError("edit does not accept more than one filename")
+
+ if not options.password_file:
+ password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ cipher = None
+
+ for f in args:
+ this_editor = VaultEditor(cipher, password, f)
+ this_editor.edit_file()
+
+def execute_view(args, options, parser):
+
+ if len(args) > 1:
+ raise errors.AnsibleError("view does not accept more than one filename")
+
+ if not options.password_file:
+ password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ cipher = None
+
+ for f in args:
+ this_editor = VaultEditor(cipher, password, f)
+ this_editor.view_file()
+
+def execute_encrypt(args, options, parser):
+
+ if not options.password_file:
+ password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ cipher = 'AES256'
+ if hasattr(options, 'cipher'):
+ cipher = options.cipher
+
+ for f in args:
+ this_editor = VaultEditor(cipher, password, f)
+ this_editor.encrypt_file()
+
+ print "Encryption successful"
+
+def execute_rekey(args, options, parser):
+
+ if not options.password_file:
+ password, __ = utils.ask_vault_passwords(ask_vault_pass=True)
+ else:
+ password = utils.read_vault_file(options.password_file)
+
+ __, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
+
+ cipher = None
+ for f in args:
+ this_editor = VaultEditor(cipher, password, f)
+ this_editor.rekey_file(new_password)
+
+ print "Rekey successful"
+
+#-------------------------------------------------------------------------------------
+# MAIN
+#-------------------------------------------------------------------------------------
+
+def main():
+
+ action = get_action(sys.argv)
+ parser = build_option_parser(action)
+ (options, args) = parser.parse_args()
+
+ if not len(args):
+ raise errors.AnsibleError(
+ "The '%s' command requires a filename as the first argument" % action
+ )
+
+ # execute the desired action
+ try:
+ fn = globals()["execute_%s" % action]
+ fn(args, options, parser)
+ except Exception, err:
+ if options.debug:
+ print traceback.format_exc()
+ print "ERROR:",err
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/test/units/README.md b/v1/tests/README.md
similarity index 100%
rename from test/units/README.md
rename to v1/tests/README.md
diff --git a/test/units/TestConstants.py b/v1/tests/TestConstants.py
similarity index 100%
rename from test/units/TestConstants.py
rename to v1/tests/TestConstants.py
diff --git a/test/units/TestFilters.py b/v1/tests/TestFilters.py
similarity index 100%
rename from test/units/TestFilters.py
rename to v1/tests/TestFilters.py
diff --git a/test/units/TestInventory.py b/v1/tests/TestInventory.py
similarity index 100%
rename from test/units/TestInventory.py
rename to v1/tests/TestInventory.py
diff --git a/test/units/TestModuleUtilsBasic.py b/v1/tests/TestModuleUtilsBasic.py
similarity index 100%
rename from test/units/TestModuleUtilsBasic.py
rename to v1/tests/TestModuleUtilsBasic.py
diff --git a/test/units/TestModuleUtilsDatabase.py b/v1/tests/TestModuleUtilsDatabase.py
similarity index 100%
rename from test/units/TestModuleUtilsDatabase.py
rename to v1/tests/TestModuleUtilsDatabase.py
diff --git a/test/units/TestModules.py b/v1/tests/TestModules.py
similarity index 100%
rename from test/units/TestModules.py
rename to v1/tests/TestModules.py
diff --git a/test/units/TestPlayVarsFiles.py b/v1/tests/TestPlayVarsFiles.py
similarity index 100%
rename from test/units/TestPlayVarsFiles.py
rename to v1/tests/TestPlayVarsFiles.py
diff --git a/test/units/TestSynchronize.py b/v1/tests/TestSynchronize.py
similarity index 100%
rename from test/units/TestSynchronize.py
rename to v1/tests/TestSynchronize.py
diff --git a/test/units/TestUtils.py b/v1/tests/TestUtils.py
similarity index 100%
rename from test/units/TestUtils.py
rename to v1/tests/TestUtils.py
diff --git a/test/units/TestUtilsStringFunctions.py b/v1/tests/TestUtilsStringFunctions.py
similarity index 100%
rename from test/units/TestUtilsStringFunctions.py
rename to v1/tests/TestUtilsStringFunctions.py
diff --git a/test/units/TestVault.py b/v1/tests/TestVault.py
similarity index 100%
rename from test/units/TestVault.py
rename to v1/tests/TestVault.py
diff --git a/test/units/TestVaultEditor.py b/v1/tests/TestVaultEditor.py
similarity index 100%
rename from test/units/TestVaultEditor.py
rename to v1/tests/TestVaultEditor.py
diff --git a/test/units/ansible.cfg b/v1/tests/ansible.cfg
similarity index 100%
rename from test/units/ansible.cfg
rename to v1/tests/ansible.cfg
diff --git a/test/units/inventory_test_data/ansible_hosts b/v1/tests/inventory_test_data/ansible_hosts
similarity index 100%
rename from test/units/inventory_test_data/ansible_hosts
rename to v1/tests/inventory_test_data/ansible_hosts
diff --git a/test/units/inventory_test_data/broken.yml b/v1/tests/inventory_test_data/broken.yml
similarity index 100%
rename from test/units/inventory_test_data/broken.yml
rename to v1/tests/inventory_test_data/broken.yml
diff --git a/test/units/inventory_test_data/common_vars.yml b/v1/tests/inventory_test_data/common_vars.yml
similarity index 100%
rename from test/units/inventory_test_data/common_vars.yml
rename to v1/tests/inventory_test_data/common_vars.yml
diff --git a/test/units/inventory_test_data/complex_hosts b/v1/tests/inventory_test_data/complex_hosts
similarity index 100%
rename from test/units/inventory_test_data/complex_hosts
rename to v1/tests/inventory_test_data/complex_hosts
diff --git a/test/units/inventory_test_data/encrypted.yml b/v1/tests/inventory_test_data/encrypted.yml
similarity index 100%
rename from test/units/inventory_test_data/encrypted.yml
rename to v1/tests/inventory_test_data/encrypted.yml
diff --git a/test/units/inventory_test_data/hosts_list.yml b/v1/tests/inventory_test_data/hosts_list.yml
similarity index 100%
rename from test/units/inventory_test_data/hosts_list.yml
rename to v1/tests/inventory_test_data/hosts_list.yml
diff --git a/test/units/inventory_test_data/inventory/test_alpha_end_before_beg b/v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_alpha_end_before_beg
rename to v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg
diff --git a/test/units/inventory_test_data/inventory/test_combined_range b/v1/tests/inventory_test_data/inventory/test_combined_range
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_combined_range
rename to v1/tests/inventory_test_data/inventory/test_combined_range
diff --git a/test/units/inventory_test_data/inventory/test_incorrect_format b/v1/tests/inventory_test_data/inventory/test_incorrect_format
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_incorrect_format
rename to v1/tests/inventory_test_data/inventory/test_incorrect_format
diff --git a/test/units/inventory_test_data/inventory/test_incorrect_range b/v1/tests/inventory_test_data/inventory/test_incorrect_range
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_incorrect_range
rename to v1/tests/inventory_test_data/inventory/test_incorrect_range
diff --git a/test/units/inventory_test_data/inventory/test_leading_range b/v1/tests/inventory_test_data/inventory/test_leading_range
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_leading_range
rename to v1/tests/inventory_test_data/inventory/test_leading_range
diff --git a/test/units/inventory_test_data/inventory/test_missing_end b/v1/tests/inventory_test_data/inventory/test_missing_end
similarity index 100%
rename from test/units/inventory_test_data/inventory/test_missing_end
rename to v1/tests/inventory_test_data/inventory/test_missing_end
diff --git a/test/units/inventory_test_data/inventory_api.py b/v1/tests/inventory_test_data/inventory_api.py
similarity index 100%
rename from test/units/inventory_test_data/inventory_api.py
rename to v1/tests/inventory_test_data/inventory_api.py
diff --git a/test/units/inventory_test_data/inventory_dir/0hosts b/v1/tests/inventory_test_data/inventory_dir/0hosts
similarity index 100%
rename from test/units/inventory_test_data/inventory_dir/0hosts
rename to v1/tests/inventory_test_data/inventory_dir/0hosts
diff --git a/test/units/inventory_test_data/inventory_dir/1mythology b/v1/tests/inventory_test_data/inventory_dir/1mythology
similarity index 100%
rename from test/units/inventory_test_data/inventory_dir/1mythology
rename to v1/tests/inventory_test_data/inventory_dir/1mythology
diff --git a/test/units/inventory_test_data/inventory_dir/2levels b/v1/tests/inventory_test_data/inventory_dir/2levels
similarity index 100%
rename from test/units/inventory_test_data/inventory_dir/2levels
rename to v1/tests/inventory_test_data/inventory_dir/2levels
diff --git a/test/units/inventory_test_data/inventory_dir/3comments b/v1/tests/inventory_test_data/inventory_dir/3comments
similarity index 100%
rename from test/units/inventory_test_data/inventory_dir/3comments
rename to v1/tests/inventory_test_data/inventory_dir/3comments
diff --git a/test/units/inventory_test_data/inventory_dir/4skip_extensions.ini b/v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini
similarity index 100%
rename from test/units/inventory_test_data/inventory_dir/4skip_extensions.ini
rename to v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini
diff --git a/test/units/inventory_test_data/large_range b/v1/tests/inventory_test_data/large_range
similarity index 100%
rename from test/units/inventory_test_data/large_range
rename to v1/tests/inventory_test_data/large_range
diff --git a/test/units/inventory_test_data/restrict_pattern b/v1/tests/inventory_test_data/restrict_pattern
similarity index 100%
rename from test/units/inventory_test_data/restrict_pattern
rename to v1/tests/inventory_test_data/restrict_pattern
diff --git a/test/units/inventory_test_data/simple_hosts b/v1/tests/inventory_test_data/simple_hosts
similarity index 100%
rename from test/units/inventory_test_data/simple_hosts
rename to v1/tests/inventory_test_data/simple_hosts
diff --git a/test/units/module_tests/TestApt.py b/v1/tests/module_tests/TestApt.py
similarity index 100%
rename from test/units/module_tests/TestApt.py
rename to v1/tests/module_tests/TestApt.py
diff --git a/test/units/module_tests/TestDocker.py b/v1/tests/module_tests/TestDocker.py
similarity index 100%
rename from test/units/module_tests/TestDocker.py
rename to v1/tests/module_tests/TestDocker.py
diff --git a/test/units/vault_test_data/foo-ansible-1.0.yml b/v1/tests/vault_test_data/foo-ansible-1.0.yml
similarity index 100%
rename from test/units/vault_test_data/foo-ansible-1.0.yml
rename to v1/tests/vault_test_data/foo-ansible-1.0.yml
diff --git a/test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml b/v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml
similarity index 100%
rename from test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml
rename to v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml
diff --git a/test/units/vault_test_data/foo-ansible-1.1.yml b/v1/tests/vault_test_data/foo-ansible-1.1.yml
similarity index 100%
rename from test/units/vault_test_data/foo-ansible-1.1.yml
rename to v1/tests/vault_test_data/foo-ansible-1.1.yml
diff --git a/v2/README-tests.md b/v2/README-tests.md
deleted file mode 100644
index 956160b653a..00000000000
--- a/v2/README-tests.md
+++ /dev/null
@@ -1,33 +0,0 @@
-Ansible Test System
-===================
-
-Folders
-=======
-
-test
-----
-
-Unit tests that test small pieces of code not suited for the integration test
-layer, usually very API based, and should leverage mock interfaces rather than
-producing side effects.
-
-Playbook engine code is better suited for integration tests.
-
-Requirements: sudo pip install paramiko PyYAML jinja2 httplib2 passlib unittest2 mock
-
-integration
------------
-
-Integration test layer, constructed using playbooks.
-
-Some tests may require cloud credentials, others will not, and destructive
-tests are separated from non-destructive so a subset can be run on development
-machines.
-
-learn more
-----------
-
-hop into a subdirectory and see the associated README.md for more info.
-
-
-
diff --git a/v2/ansible/__init__.py b/v2/ansible/__init__.py
deleted file mode 100644
index 8637adb54d6..00000000000
--- a/v2/ansible/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-__version__ = '2.0'
diff --git a/v2/ansible/inventory/host.py b/v2/ansible/inventory/host.py
deleted file mode 100644
index 29d6afd9912..00000000000
--- a/v2/ansible/inventory/host.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible import constants as C
-from ansible.inventory.group import Group
-from ansible.utils.vars import combine_vars
-
-__all__ = ['Host']
-
-class Host:
- ''' a single ansible host '''
-
- #__slots__ = [ 'name', 'vars', 'groups' ]
-
- def __getstate__(self):
- return self.serialize()
-
- def __setstate__(self, data):
- return self.deserialize(data)
-
- def __eq__(self, other):
- return self.name == other.name
-
- def serialize(self):
- groups = []
- for group in self.groups:
- groups.append(group.serialize())
-
- return dict(
- name=self.name,
- vars=self.vars.copy(),
- ipv4_address=self.ipv4_address,
- ipv6_address=self.ipv6_address,
- port=self.port,
- gathered_facts=self._gathered_facts,
- groups=groups,
- )
-
- def deserialize(self, data):
- self.__init__()
-
- self.name = data.get('name')
- self.vars = data.get('vars', dict())
- self.ipv4_address = data.get('ipv4_address', '')
- self.ipv6_address = data.get('ipv6_address', '')
- self.port = data.get('port')
-
- groups = data.get('groups', [])
- for group_data in groups:
- g = Group()
- g.deserialize(group_data)
- self.groups.append(g)
-
- def __init__(self, name=None, port=None):
-
- self.name = name
- self.vars = {}
- self.groups = []
-
- self.ipv4_address = name
- self.ipv6_address = name
-
- if port and port != C.DEFAULT_REMOTE_PORT:
- self.port = int(port)
- else:
- self.port = C.DEFAULT_REMOTE_PORT
-
- self._gathered_facts = False
-
- def __repr__(self):
- return self.get_name()
-
- def get_name(self):
- return self.name
-
- @property
- def gathered_facts(self):
- return self._gathered_facts
-
- def set_gathered_facts(self, gathered):
- self._gathered_facts = gathered
-
- def add_group(self, group):
-
- self.groups.append(group)
-
- def set_variable(self, key, value):
-
- self.vars[key]=value
-
- def get_groups(self):
-
- groups = {}
- for g in self.groups:
- groups[g.name] = g
- ancestors = g.get_ancestors()
- for a in ancestors:
- groups[a.name] = a
- return groups.values()
-
- def get_vars(self):
-
- results = {}
- groups = self.get_groups()
- for group in sorted(groups, key=lambda g: g.depth):
- results = combine_vars(results, group.get_vars())
- results = combine_vars(results, self.vars)
- results['inventory_hostname'] = self.name
- results['inventory_hostname_short'] = self.name.split('.')[0]
- results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
- return results
-
diff --git a/v2/ansible/modules/core b/v2/ansible/modules/core
deleted file mode 160000
index 0341ddd35ed..00000000000
--- a/v2/ansible/modules/core
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 0341ddd35ed5ff477ad5de2488d947255ce86259
diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras
deleted file mode 160000
index dd80fa221ce..00000000000
--- a/v2/ansible/modules/extras
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit dd80fa221ce0adb3abd658fbd1aa09bf7cf8a6dc
diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py
deleted file mode 100644
index 40e6638f239..00000000000
--- a/v2/ansible/playbook/__init__.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-
-from ansible.errors import AnsibleError, AnsibleParserError
-from ansible.parsing import DataLoader
-from ansible.playbook.attribute import Attribute, FieldAttribute
-from ansible.playbook.play import Play
-from ansible.playbook.playbook_include import PlaybookInclude
-from ansible.plugins import push_basedir
-
-
-__all__ = ['Playbook']
-
-
-class Playbook:
-
- def __init__(self, loader):
- # Entries in the datastructure of a playbook may
- # be either a play or an include statement
- self._entries = []
- self._basedir = os.getcwd()
- self._loader = loader
-
- @staticmethod
- def load(file_name, variable_manager=None, loader=None):
- pb = Playbook(loader=loader)
- pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
- return pb
-
- def _load_playbook_data(self, file_name, variable_manager):
-
- if os.path.isabs(file_name):
- self._basedir = os.path.dirname(file_name)
- else:
- self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
-
- # set the loaders basedir
- self._loader.set_basedir(self._basedir)
-
- # also add the basedir to the list of module directories
- push_basedir(self._basedir)
-
- ds = self._loader.load_from_file(os.path.basename(file_name))
- if not isinstance(ds, list):
- raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
-
- # Parse the playbook entries. For plays, we simply parse them
- # using the Play() object, and includes are parsed using the
- # PlaybookInclude() object
- for entry in ds:
- if not isinstance(entry, dict):
- raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
-
- if 'include' in entry:
- pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
- self._entries.extend(pb._entries)
- else:
- entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
- self._entries.append(entry_obj)
-
- def get_loader(self):
- return self._loader
-
- def get_plays(self):
- return self._entries[:]
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
deleted file mode 100644
index b99c01fdf74..00000000000
--- a/v2/ansible/playbook/play.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import AnsibleError, AnsibleParserError
-
-from ansible.playbook.attribute import Attribute, FieldAttribute
-from ansible.playbook.base import Base
-from ansible.playbook.become import Become
-from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
-from ansible.playbook.role import Role
-from ansible.playbook.taggable import Taggable
-from ansible.playbook.block import Block
-
-from ansible.utils.vars import combine_vars
-
-
-__all__ = ['Play']
-
-
-class Play(Base, Taggable, Become):
-
- """
- A play is a language feature that represents a list of roles and/or
- task/handler blocks to execute on a given set of hosts.
-
- Usage:
-
- Play.load(datastructure) -> Play
- Play.something(...)
- """
-
- # =================================================================================
- # Connection-Related Attributes
-
- # TODO: generalize connection
- _accelerate = FieldAttribute(isa='bool', default=False)
- _accelerate_ipv6 = FieldAttribute(isa='bool', default=False)
- _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port
-
- # Connection
- _gather_facts = FieldAttribute(isa='string', default='smart')
- _hosts = FieldAttribute(isa='list', default=[], required=True)
- _name = FieldAttribute(isa='string', default='')
-
- # Variable Attributes
- _vars_files = FieldAttribute(isa='list', default=[])
- _vars_prompt = FieldAttribute(isa='dict', default=dict())
- _vault_password = FieldAttribute(isa='string')
-
- # Block (Task) Lists Attributes
- _handlers = FieldAttribute(isa='list', default=[])
- _pre_tasks = FieldAttribute(isa='list', default=[])
- _post_tasks = FieldAttribute(isa='list', default=[])
- _tasks = FieldAttribute(isa='list', default=[])
-
- # Role Attributes
- _roles = FieldAttribute(isa='list', default=[])
-
- # Flag/Setting Attributes
- _any_errors_fatal = FieldAttribute(isa='bool', default=False)
- _max_fail_percentage = FieldAttribute(isa='string', default='0')
- _serial = FieldAttribute(isa='int', default=0)
- _strategy = FieldAttribute(isa='string', default='linear')
-
- # =================================================================================
-
- def __init__(self):
- super(Play, self).__init__()
-
- def __repr__(self):
- return self.get_name()
-
- def get_name(self):
- ''' return the name of the Play '''
- return "PLAY: %s" % self._attributes.get('name')
-
- @staticmethod
- def load(data, variable_manager=None, loader=None):
- p = Play()
- return p.load_data(data, variable_manager=variable_manager, loader=loader)
-
- def preprocess_data(self, ds):
- '''
- Adjusts play datastructure to cleanup old/legacy items
- '''
-
- assert isinstance(ds, dict)
-
- # The use of 'user' in the Play datastructure was deprecated to
- # line up with the same change for Tasks, due to the fact that
- # 'user' conflicted with the user module.
- if 'user' in ds:
- # this should never happen, but error out with a helpful message
- # to the user if it does...
- if 'remote_user' in ds:
- raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
-
- ds['remote_user'] = ds['user']
- del ds['user']
-
- return super(Play, self).preprocess_data(ds)
-
- def _load_vars(self, attr, ds):
- '''
- Vars in a play can be specified either as a dictionary directly, or
- as a list of dictionaries. If the later, this method will turn the
- list into a single dictionary.
- '''
-
- try:
- if isinstance(ds, dict):
- return ds
- elif isinstance(ds, list):
- all_vars = dict()
- for item in ds:
- if not isinstance(item, dict):
- raise ValueError
- all_vars = combine_vars(all_vars, item)
- return all_vars
- else:
- raise ValueError
- except ValueError:
- raise AnsibleParserError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds)
-
- def _load_tasks(self, attr, ds):
- '''
- Loads a list of blocks from a list which may be mixed tasks/blocks.
- Bare tasks outside of a block are given an implicit block.
- '''
- return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
-
- def _load_pre_tasks(self, attr, ds):
- '''
- Loads a list of blocks from a list which may be mixed tasks/blocks.
- Bare tasks outside of a block are given an implicit block.
- '''
- return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
-
- def _load_post_tasks(self, attr, ds):
- '''
- Loads a list of blocks from a list which may be mixed tasks/blocks.
- Bare tasks outside of a block are given an implicit block.
- '''
- return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
-
- def _load_handlers(self, attr, ds):
- '''
- Loads a list of blocks from a list which may be mixed handlers/blocks.
- Bare handlers outside of a block are given an implicit block.
- '''
- return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader)
-
- def _load_roles(self, attr, ds):
- '''
- Loads and returns a list of RoleInclude objects from the datastructure
- list of role definitions and creates the Role from those objects
- '''
-
- role_includes = load_list_of_roles(ds, variable_manager=self._variable_manager, loader=self._loader)
-
- roles = []
- for ri in role_includes:
- roles.append(Role.load(ri))
- return roles
-
- # FIXME: post_validation needs to ensure that become/su/sudo have only 1 set
-
- def _compile_roles(self):
- '''
- Handles the role compilation step, returning a flat list of tasks
- with the lowest level dependencies first. For example, if a role R
- has a dependency D1, which also has a dependency D2, the tasks from
- D2 are merged first, followed by D1, and lastly by the tasks from
- the parent role R last. This is done for all roles in the Play.
- '''
-
- block_list = []
-
- if len(self.roles) > 0:
- for r in self.roles:
- block_list.extend(r.compile(play=self))
-
- return block_list
-
- def compile(self):
- '''
- Compiles and returns the task list for this play, compiled from the
- roles (which are themselves compiled recursively) and/or the list of
- tasks specified in the play.
- '''
-
- block_list = []
-
- block_list.extend(self.pre_tasks)
- block_list.extend(self._compile_roles())
- block_list.extend(self.tasks)
- block_list.extend(self.post_tasks)
-
- return block_list
-
- def get_vars(self):
- return self.vars.copy()
-
- def get_vars_files(self):
- return self.vars_files
-
- def get_handlers(self):
- return self.handlers[:]
-
- def get_roles(self):
- return self.roles[:]
-
- def get_tasks(self):
- tasklist = []
- for task in self.pre_tasks + self.tasks + self.post_tasks:
- if isinstance(task, Block):
- tasklist.append(task.block + task.rescue + task.always)
- else:
- tasklist.append(task)
- return tasklist
-
- def serialize(self):
- data = super(Play, self).serialize()
-
- roles = []
- for role in self.get_roles():
- roles.append(role.serialize())
- data['roles'] = roles
-
- return data
-
- def deserialize(self, data):
- super(Play, self).deserialize(data)
-
- if 'roles' in data:
- role_data = data.get('roles', [])
- roles = []
- for role in role_data:
- r = Role()
- r.deserialize(role)
- roles.append(r)
-
- setattr(self, 'roles', roles)
- del data['roles']
-
diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py
deleted file mode 100644
index 06060257985..00000000000
--- a/v2/ansible/playbook/task.py
+++ /dev/null
@@ -1,310 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import AnsibleError
-
-from ansible.parsing.mod_args import ModuleArgsParser
-from ansible.parsing.splitter import parse_kv
-from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
-
-from ansible.plugins import module_loader, lookup_loader
-
-from ansible.playbook.attribute import Attribute, FieldAttribute
-from ansible.playbook.base import Base
-from ansible.playbook.become import Become
-from ansible.playbook.block import Block
-from ansible.playbook.conditional import Conditional
-from ansible.playbook.role import Role
-from ansible.playbook.taggable import Taggable
-
-__all__ = ['Task']
-
-class Task(Base, Conditional, Taggable, Become):
-
- """
- A task is a language feature that represents a call to a module, with given arguments and other parameters.
- A handler is a subclass of a task.
-
- Usage:
-
- Task.load(datastructure) -> Task
- Task.something(...)
- """
-
- # =================================================================================
- # ATTRIBUTES
- # load_ and
- # validate_
- # will be used if defined
- # might be possible to define others
-
- _args = FieldAttribute(isa='dict', default=dict())
- _action = FieldAttribute(isa='string')
-
- _always_run = FieldAttribute(isa='bool')
- _any_errors_fatal = FieldAttribute(isa='bool')
- _async = FieldAttribute(isa='int', default=0)
- _changed_when = FieldAttribute(isa='string')
- _delay = FieldAttribute(isa='int', default=5)
- _delegate_to = FieldAttribute(isa='string')
- _failed_when = FieldAttribute(isa='string')
- _first_available_file = FieldAttribute(isa='list')
- _ignore_errors = FieldAttribute(isa='bool')
-
- _loop = FieldAttribute(isa='string', private=True)
- _loop_args = FieldAttribute(isa='list', private=True)
- _local_action = FieldAttribute(isa='string')
-
- # FIXME: this should not be a Task
- _meta = FieldAttribute(isa='string')
-
- _name = FieldAttribute(isa='string', default='')
-
- _notify = FieldAttribute(isa='list')
- _poll = FieldAttribute(isa='int')
- _register = FieldAttribute(isa='string')
- _retries = FieldAttribute(isa='int', default=1)
- _run_once = FieldAttribute(isa='bool')
- _until = FieldAttribute(isa='list') # ?
-
- def __init__(self, block=None, role=None, task_include=None):
- ''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
-
- self._block = block
- self._role = role
- self._task_include = task_include
-
- super(Task, self).__init__()
-
- def get_name(self):
- ''' return the name of the task '''
-
- if self._role and self.name:
- return "%s : %s" % (self._role.get_name(), self.name)
- elif self.name:
- return self.name
- else:
- flattened_args = self._merge_kv(self.args)
- if self._role:
- return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args)
- else:
- return "%s %s" % (self.action, flattened_args)
-
- def _merge_kv(self, ds):
- if ds is None:
- return ""
- elif isinstance(ds, basestring):
- return ds
- elif isinstance(ds, dict):
- buf = ""
- for (k,v) in ds.iteritems():
- if k.startswith('_'):
- continue
- buf = buf + "%s=%s " % (k,v)
- buf = buf.strip()
- return buf
-
- @staticmethod
- def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
- t = Task(block=block, role=role, task_include=task_include)
- return t.load_data(data, variable_manager=variable_manager, loader=loader)
-
- def __repr__(self):
- ''' returns a human readable representation of the task '''
- return "TASK: %s" % self.get_name()
-
- def _preprocess_loop(self, ds, new_ds, k, v):
- ''' take a lookup plugin name and store it correctly '''
-
- loop_name = k.replace("with_", "")
- if new_ds.get('loop') is not None:
- raise AnsibleError("duplicate loop in task: %s" % loop_name)
- new_ds['loop'] = loop_name
- new_ds['loop_args'] = v
-
- def preprocess_data(self, ds):
- '''
- tasks are especially complex arguments so need pre-processing.
- keep it short.
- '''
-
- assert isinstance(ds, dict)
-
- # the new, cleaned datastructure, which will have legacy
- # items reduced to a standard structure suitable for the
- # attributes of the task class
- new_ds = AnsibleMapping()
- if isinstance(ds, AnsibleBaseYAMLObject):
- new_ds.ansible_pos = ds.ansible_pos
-
- # use the args parsing class to determine the action, args,
- # and the delegate_to value from the various possible forms
- # supported as legacy
- args_parser = ModuleArgsParser(task_ds=ds)
- (action, args, delegate_to) = args_parser.parse()
-
- new_ds['action'] = action
- new_ds['args'] = args
- new_ds['delegate_to'] = delegate_to
-
- for (k,v) in ds.iteritems():
- if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
- # we don't want to re-assign these values, which were
- # determined by the ModuleArgsParser() above
- continue
- elif k.replace("with_", "") in lookup_loader:
- self._preprocess_loop(ds, new_ds, k, v)
- else:
- new_ds[k] = v
-
- return super(Task, self).preprocess_data(new_ds)
-
- def post_validate(self, templar):
- '''
- Override of base class post_validate, to also do final validation on
- the block and task include (if any) to which this task belongs.
- '''
-
- if self._block:
- self._block.post_validate(templar)
- if self._task_include:
- self._task_include.post_validate(templar)
-
- super(Task, self).post_validate(templar)
-
- def get_vars(self):
- all_vars = self.vars.copy()
- if self._block:
- all_vars.update(self._block.get_vars())
- if self._task_include:
- all_vars.update(self._task_include.get_vars())
-
- all_vars.update(self.serialize())
-
- if 'tags' in all_vars:
- del all_vars['tags']
- if 'when' in all_vars:
- del all_vars['when']
- return all_vars
-
- def copy(self, exclude_block=False):
- new_me = super(Task, self).copy()
-
- new_me._block = None
- if self._block and not exclude_block:
- new_me._block = self._block.copy()
-
- new_me._role = None
- if self._role:
- new_me._role = self._role
-
- new_me._task_include = None
- if self._task_include:
- new_me._task_include = self._task_include.copy()
-
- return new_me
-
- def serialize(self):
- data = super(Task, self).serialize()
-
- if self._block:
- data['block'] = self._block.serialize()
-
- if self._role:
- data['role'] = self._role.serialize()
-
- if self._task_include:
- data['task_include'] = self._task_include.serialize()
-
- return data
-
- def deserialize(self, data):
-
- # import is here to avoid import loops
- #from ansible.playbook.task_include import TaskInclude
-
- block_data = data.get('block')
-
- if block_data:
- b = Block()
- b.deserialize(block_data)
- self._block = b
- del data['block']
-
- role_data = data.get('role')
- if role_data:
- r = Role()
- r.deserialize(role_data)
- self._role = r
- del data['role']
-
- ti_data = data.get('task_include')
- if ti_data:
- #ti = TaskInclude()
- ti = Task()
- ti.deserialize(ti_data)
- self._task_include = ti
- del data['task_include']
-
- super(Task, self).deserialize(data)
-
- def evaluate_conditional(self, all_vars):
- if self._block is not None:
- if not self._block.evaluate_conditional(all_vars):
- return False
- if self._task_include is not None:
- if not self._task_include.evaluate_conditional(all_vars):
- return False
- return super(Task, self).evaluate_conditional(all_vars)
-
- def set_loader(self, loader):
- '''
- Sets the loader on this object and recursively on parent, child objects.
- This is used primarily after the Task has been serialized/deserialized, which
- does not preserve the loader.
- '''
-
- self._loader = loader
-
- if self._block:
- self._block.set_loader(loader)
- if self._task_include:
- self._task_include.set_loader(loader)
-
- def _get_parent_attribute(self, attr, extend=False):
- '''
- Generic logic to get the attribute or parent attribute for a task value.
- '''
- value = self._attributes[attr]
- if self._block and (not value or extend):
- parent_value = getattr(self._block, attr)
- if extend:
- value = self._extend_value(value, parent_value)
- else:
- value = parent_value
- if self._task_include and (not value or extend):
- parent_value = getattr(self._task_include, attr)
- if extend:
- value = self._extend_value(value, parent_value)
- else:
- value = parent_value
- return value
-
diff --git a/v2/ansible/utils/vault.py b/v2/ansible/utils/vault.py
deleted file mode 100644
index 5c704afac59..00000000000
--- a/v2/ansible/utils/vault.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-import subprocess
-
-from ansible import constants as C
-from ansible.errors import AnsibleError
-from ansible.utils.path import is_executable
-
-def read_vault_file(vault_password_file):
- """
- Read a vault password from a file or if executable, execute the script and
- retrieve password from STDOUT
- """
-
- this_path = os.path.realpath(os.path.expanduser(vault_password_file))
- if not os.path.exists(this_path):
- raise AnsibleError("The vault password file %s was not found" % this_path)
-
- if is_executable(this_path):
- try:
- # STDERR not captured to make it easier for users to prompt for input in their scripts
- p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
- except OSError as e:
- raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
- stdout, stderr = p.communicate()
- vault_pass = stdout.strip('\r\n')
- else:
- try:
- f = open(this_path, "rb")
- vault_pass=f.read().strip()
- f.close()
- except (OSError, IOError) as e:
- raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
-
- return vault_pass
-
diff --git a/v2/bin/ansible b/v2/bin/ansible
deleted file mode 100755
index 467dd505a2e..00000000000
--- a/v2/bin/ansible
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-########################################################
-from __future__ import (absolute_import)
-__metaclass__ = type
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import os
-import sys
-
-from ansible.errors import AnsibleError, AnsibleOptionsError
-from ansible.utils.display import Display
-
-########################################################
-
-if __name__ == '__main__':
-
- cli = None
- display = Display()
- me = os.path.basename(__file__)
-
- try:
- if me == 'ansible-playbook':
- from ansible.cli.playbook import PlaybookCLI as mycli
- elif me == 'ansible':
- from ansible.cli.adhoc import AdHocCLI as mycli
- elif me == 'ansible-pull':
- from ansible.cli.pull import PullCLI as mycli
- elif me == 'ansible-doc':
- from ansible.cli.doc import DocCLI as mycli
- elif me == 'ansible-vault':
- from ansible.cli.vault import VaultCLI as mycli
- elif me == 'ansible-galaxy':
- from ansible.cli.galaxy import GalaxyCLI as mycli
-
- cli = mycli(sys.argv, display=display)
- if cli:
- cli.parse()
- sys.exit(cli.run())
- else:
- raise AnsibleError("Program not implemented: %s" % me)
-
- except AnsibleOptionsError as e:
- cli.parser.print_help()
- display.display(str(e), stderr=True, color='red')
- sys.exit(1)
- except AnsibleError as e:
- display.display(str(e), stderr=True, color='red')
- sys.exit(2)
- except KeyboardInterrupt:
- display.error("interrupted")
- sys.exit(4)
diff --git a/v2/bin/ansible-doc b/v2/bin/ansible-doc
deleted file mode 120000
index cabb1f519aa..00000000000
--- a/v2/bin/ansible-doc
+++ /dev/null
@@ -1 +0,0 @@
-ansible
\ No newline at end of file
diff --git a/v2/bin/ansible-galaxy b/v2/bin/ansible-galaxy
deleted file mode 120000
index cabb1f519aa..00000000000
--- a/v2/bin/ansible-galaxy
+++ /dev/null
@@ -1 +0,0 @@
-ansible
\ No newline at end of file
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
deleted file mode 120000
index cabb1f519aa..00000000000
--- a/v2/bin/ansible-playbook
+++ /dev/null
@@ -1 +0,0 @@
-ansible
\ No newline at end of file
diff --git a/v2/bin/ansible-pull b/v2/bin/ansible-pull
deleted file mode 120000
index cabb1f519aa..00000000000
--- a/v2/bin/ansible-pull
+++ /dev/null
@@ -1 +0,0 @@
-ansible
\ No newline at end of file
diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault
deleted file mode 120000
index cabb1f519aa..00000000000
--- a/v2/bin/ansible-vault
+++ /dev/null
@@ -1 +0,0 @@
-ansible
\ No newline at end of file
diff --git a/v2/hacking/README.md b/v2/hacking/README.md
deleted file mode 100644
index 6d65464eee8..00000000000
--- a/v2/hacking/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-'Hacking' directory tools
-=========================
-
-Env-setup
----------
-
-The 'env-setup' script modifies your environment to allow you to run
-ansible from a git checkout using python 2.6+. (You may not use
-python 3 at this time).
-
-First, set up your environment to run from the checkout:
-
- $ source ./hacking/env-setup
-
-You will need some basic prerequisites installed. If you do not already have them
-and do not wish to install them from your operating system package manager, you
-can install them from pip
-
- $ easy_install pip # if pip is not already available
- $ pip install pyyaml jinja2 nose passlib pycrypto
-
-From there, follow ansible instructions on docs.ansible.com as normal.
-
-Test-module
------------
-
-'test-module' is a simple program that allows module developers (or testers) to run
-a module outside of the ansible program, locally, on the current machine.
-
-Example:
-
- $ ./hacking/test-module -m library/commands/shell -a "echo hi"
-
-This is a good way to insert a breakpoint into a module, for instance.
-
-Module-formatter
-----------------
-
-The module formatter is a script used to generate manpages and online
-module documentation. This is used by the system makefiles and rarely
-needs to be run directly.
-
-Authors
--------
-'authors' is a simple script that generates a list of everyone who has
-contributed code to the ansible repository.
-
-
diff --git a/v2/hacking/authors.sh b/v2/hacking/authors.sh
deleted file mode 100755
index 7c97840b2fb..00000000000
--- a/v2/hacking/authors.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-# script from http://stackoverflow.com/questions/12133583
-set -e
-
-# Get a list of authors ordered by number of commits
-# and remove the commit count column
-AUTHORS=$(git --no-pager shortlog -nse | cut -f 2- | sort -f)
-if [ -z "$AUTHORS" ] ; then
- echo "Authors list was empty"
- exit 1
-fi
-
-# Display the authors list and write it to the file
-echo "$AUTHORS" | tee "$(git rev-parse --show-toplevel)/AUTHORS.TXT"
diff --git a/v2/hacking/env-setup b/v2/hacking/env-setup
deleted file mode 100644
index 8f2c331fe46..00000000000
--- a/v2/hacking/env-setup
+++ /dev/null
@@ -1,78 +0,0 @@
-# usage: source hacking/env-setup [-q]
-# modifies environment for running Ansible from checkout
-
-# Default values for shell variables we use
-PYTHONPATH=${PYTHONPATH-""}
-PATH=${PATH-""}
-MANPATH=${MANPATH-""}
-verbosity=${1-info} # Defaults to `info' if unspecified
-
-if [ "$verbosity" = -q ]; then
- verbosity=silent
-fi
-
-# When run using source as directed, $0 gets set to bash, so we must use $BASH_SOURCE
-if [ -n "$BASH_SOURCE" ] ; then
- HACKING_DIR=$(dirname "$BASH_SOURCE")
-elif [ $(basename -- "$0") = "env-setup" ]; then
- HACKING_DIR=$(dirname "$0")
-# Works with ksh93 but not pdksh
-elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then
- HACKING_DIR=$(dirname "${.sh.file}")
-else
- HACKING_DIR="$PWD/hacking"
-fi
-# The below is an alternative to readlink -fn which doesn't exist on OS X
-# Source: http://stackoverflow.com/a/1678636
-FULL_PATH=$(python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
-ANSIBLE_HOME=$(dirname "$FULL_PATH")
-
-PREFIX_PYTHONPATH="$ANSIBLE_HOME"
-PREFIX_PATH="$ANSIBLE_HOME/bin"
-PREFIX_MANPATH="$ANSIBLE_HOME/docs/man"
-
-expr "$PYTHONPATH" : "${PREFIX_PYTHONPATH}.*" > /dev/null || export PYTHONPATH="$PREFIX_PYTHONPATH:$PYTHONPATH"
-expr "$PATH" : "${PREFIX_PATH}.*" > /dev/null || export PATH="$PREFIX_PATH:$PATH"
-expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_MANPATH:$MANPATH"
-
-#
-# Generate egg_info so that pkg_resources works
-#
-
-# Do the work in a function so we don't repeat ourselves later
-gen_egg_info()
-{
- if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then
- rm -r "$PREFIX_PYTHONPATH/ansible.egg-info"
- fi
- python setup.py egg_info
-}
-
-if [ "$ANSIBLE_HOME" != "$PWD" ] ; then
- current_dir="$PWD"
-else
- current_dir="$ANSIBLE_HOME"
-fi
-cd "$ANSIBLE_HOME"
-#if [ "$verbosity" = silent ] ; then
-# gen_egg_info > /dev/null 2>&1
-#else
-# gen_egg_info
-#fi
-cd "$current_dir"
-
-if [ "$verbosity" != silent ] ; then
- cat <<- EOF
-
- Setting up Ansible to run out of checkout...
-
- PATH=$PATH
- PYTHONPATH=$PYTHONPATH
- MANPATH=$MANPATH
-
- Remember, you may wish to specify your host file with -i
-
- Done!
-
- EOF
-fi
diff --git a/v2/hacking/env-setup.fish b/v2/hacking/env-setup.fish
deleted file mode 100644
index 05fb60672d1..00000000000
--- a/v2/hacking/env-setup.fish
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env fish
-# usage: . ./hacking/env-setup [-q]
-# modifies environment for running Ansible from checkout
-set HACKING_DIR (dirname (status -f))
-set FULL_PATH (python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
-set ANSIBLE_HOME (dirname $FULL_PATH)
-set PREFIX_PYTHONPATH $ANSIBLE_HOME/lib
-set PREFIX_PATH $ANSIBLE_HOME/bin
-set PREFIX_MANPATH $ANSIBLE_HOME/docs/man
-
-# Set PYTHONPATH
-if not set -q PYTHONPATH
- set -gx PYTHONPATH $PREFIX_PYTHONPATH
-else
- switch PYTHONPATH
- case "$PREFIX_PYTHONPATH*"
- case "*"
- echo "Appending PYTHONPATH"
- set -gx PYTHONPATH "$PREFIX_PYTHONPATH:$PYTHONPATH"
- end
-end
-
-# Set PATH
-if not contains $PREFIX_PATH $PATH
- set -gx PATH $PREFIX_PATH $PATH
-end
-
-# Set MANPATH
-if not contains $PREFIX_MANPATH $MANPATH
- if not set -q MANPATH
- set -gx MANPATH $PREFIX_MANPATH
- else
- set -gx MANPATH $PREFIX_MANPATH $MANPATH
- end
-end
-
-set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library
-
-if set -q argv
- switch $argv
- case '-q' '--quiet'
- case '*'
- echo ""
- echo "Setting up Ansible to run out of checkout..."
- echo ""
- echo "PATH=$PATH"
- echo "PYTHONPATH=$PYTHONPATH"
- echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY"
- echo "MANPATH=$MANPATH"
- echo ""
-
- echo "Remember, you may wish to specify your host file with -i"
- echo ""
- echo "Done!"
- echo ""
- end
-end
diff --git a/v2/hacking/get_library.py b/v2/hacking/get_library.py
deleted file mode 100755
index 571183b688c..00000000000
--- a/v2/hacking/get_library.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2014, Will Thames
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-
-import ansible.constants as C
-import sys
-
-def main():
- print C.DEFAULT_MODULE_PATH
- return 0
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/v2/hacking/module_formatter.py b/v2/hacking/module_formatter.py
deleted file mode 100755
index e70eb982de0..00000000000
--- a/v2/hacking/module_formatter.py
+++ /dev/null
@@ -1,442 +0,0 @@
-#!/usr/bin/env python
-# (c) 2012, Jan-Piet Mens
-# (c) 2012-2014, Michael DeHaan and others
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-
-import os
-import glob
-import sys
-import yaml
-import codecs
-import json
-import ast
-import re
-import optparse
-import time
-import datetime
-import subprocess
-import cgi
-from jinja2 import Environment, FileSystemLoader
-
-import ansible.utils
-import ansible.utils.module_docs as module_docs
-
-#####################################################################################
-# constants and paths
-
-# if a module is added in a version of Ansible older than this, don't print the version added information
-# in the module documentation because everyone is assumed to be running something newer than this already.
-TO_OLD_TO_BE_NOTABLE = 1.0
-
-# Get parent directory of the directory this script lives in
-MODULEDIR=os.path.abspath(os.path.join(
- os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
-))
-
-# The name of the DOCUMENTATION template
-EXAMPLE_YAML=os.path.abspath(os.path.join(
- os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
-))
-
-_ITALIC = re.compile(r"I\(([^)]+)\)")
-_BOLD = re.compile(r"B\(([^)]+)\)")
-_MODULE = re.compile(r"M\(([^)]+)\)")
-_URL = re.compile(r"U\(([^)]+)\)")
-_CONST = re.compile(r"C\(([^)]+)\)")
-
-DEPRECATED = " (D)"
-NOTCORE = " (E)"
-#####################################################################################
-
-def rst_ify(text):
- ''' convert symbols like I(this is in italics) to valid restructured text '''
-
- t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
- t = _BOLD.sub(r'**' + r"\1" + r"**", t)
- t = _MODULE.sub(r'``' + r"\1" + r"``", t)
- t = _URL.sub(r"\1", t)
- t = _CONST.sub(r'``' + r"\1" + r"``", t)
-
- return t
-
-#####################################################################################
-
-def html_ify(text):
- ''' convert symbols like I(this is in italics) to valid HTML '''
-
- t = cgi.escape(text)
- t = _ITALIC.sub("" + r"\1" + "", t)
- t = _BOLD.sub("" + r"\1" + "", t)
- t = _MODULE.sub("" + r"\1" + "", t)
- t = _URL.sub("" + r"\1" + "", t)
- t = _CONST.sub("" + r"\1" + "
", t)
-
- return t
-
-
-#####################################################################################
-
-def rst_fmt(text, fmt):
- ''' helper for Jinja2 to do format strings '''
-
- return fmt % (text)
-
-#####################################################################################
-
-def rst_xline(width, char="="):
- ''' return a restructured text line of a given length '''
-
- return char * width
-
-#####################################################################################
-
-def write_data(text, options, outputname, module):
- ''' dumps module output to a file or the screen, as requested '''
-
- if options.output_dir is not None:
- fname = os.path.join(options.output_dir, outputname % module)
- fname = fname.replace(".py","")
- f = open(fname, 'w')
- f.write(text.encode('utf-8'))
- f.close()
- else:
- print text
-
-#####################################################################################
-
-
-def list_modules(module_dir, depth=0):
- ''' returns a hash of categories, each category being a hash of module names to file paths '''
-
- categories = dict(all=dict(),_aliases=dict())
- if depth <= 3: # limit # of subdirs
-
- files = glob.glob("%s/*" % module_dir)
- for d in files:
-
- category = os.path.splitext(os.path.basename(d))[0]
- if os.path.isdir(d):
-
- res = list_modules(d, depth + 1)
- for key in res.keys():
- if key in categories:
- categories[key] = ansible.utils.merge_hash(categories[key], res[key])
- res.pop(key, None)
-
- if depth < 2:
- categories.update(res)
- else:
- category = module_dir.split("/")[-1]
- if not category in categories:
- categories[category] = res
- else:
- categories[category].update(res)
- else:
- module = category
- category = os.path.basename(module_dir)
- if not d.endswith(".py") or d.endswith('__init__.py'):
- # windows powershell modules have documentation stubs in python docstring
- # format (they are not executed) so skip the ps1 format files
- continue
- elif module.startswith("_") and os.path.islink(d):
- source = os.path.splitext(os.path.basename(os.path.realpath(d)))[0]
- module = module.replace("_","",1)
- if not d in categories['_aliases']:
- categories['_aliases'][source] = [module]
- else:
- categories['_aliases'][source].update(module)
- continue
-
- if not category in categories:
- categories[category] = {}
- categories[category][module] = d
- categories['all'][module] = d
-
- return categories
-
-#####################################################################################
-
-def generate_parser():
- ''' generate an optparse parser '''
-
- p = optparse.OptionParser(
- version='%prog 1.0',
- usage='usage: %prog [options] arg1 arg2',
- description='Generate module documentation from metadata',
- )
-
- p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
- p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
- p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
- p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
- p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose")
- p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
- p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
- p.add_option('-V', action='version', help='Show version number and exit')
- return p
-
-#####################################################################################
-
-def jinja2_environment(template_dir, typ):
-
- env = Environment(loader=FileSystemLoader(template_dir),
- variable_start_string="@{",
- variable_end_string="}@",
- trim_blocks=True,
- )
- env.globals['xline'] = rst_xline
-
- if typ == 'rst':
- env.filters['convert_symbols_to_format'] = rst_ify
- env.filters['html_ify'] = html_ify
- env.filters['fmt'] = rst_fmt
- env.filters['xline'] = rst_xline
- template = env.get_template('rst.j2')
- outputname = "%s_module.rst"
- else:
- raise Exception("unknown module format type: %s" % typ)
-
- return env, template, outputname
-
-#####################################################################################
-
-def process_module(module, options, env, template, outputname, module_map, aliases):
-
- fname = module_map[module]
- if isinstance(fname, dict):
- return "SKIPPED"
-
- basename = os.path.basename(fname)
- deprecated = False
-
- # ignore files with extensions
- if not basename.endswith(".py"):
- return
- elif module.startswith("_"):
- if os.path.islink(fname):
- return # ignore, its an alias
- deprecated = True
- module = module.replace("_","",1)
-
- print "rendering: %s" % module
-
- # use ansible core library to parse out doc metadata YAML and plaintext examples
- doc, examples, returndocs = ansible.utils.module_docs.get_docstring(fname, verbose=options.verbose)
-
- # crash if module is missing documentation and not explicitly hidden from docs index
- if doc is None:
- if module in ansible.utils.module_docs.BLACKLIST_MODULES:
- return "SKIPPED"
- else:
- sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
- sys.exit(1)
-
- if deprecated and 'deprecated' not in doc:
- sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module))
- sys.exit(1)
-
- if "/core/" in fname:
- doc['core'] = True
- else:
- doc['core'] = False
-
- if module in aliases:
- doc['aliases'] = aliases[module]
-
- all_keys = []
-
- if not 'version_added' in doc:
- sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module)
- sys.exit(1)
-
- added = 0
- if doc['version_added'] == 'historical':
- del doc['version_added']
- else:
- added = doc['version_added']
-
- # don't show version added information if it's too old to be called out
- if added:
- added_tokens = str(added).split(".")
- added = added_tokens[0] + "." + added_tokens[1]
- added_float = float(added)
- if added and added_float < TO_OLD_TO_BE_NOTABLE:
- del doc['version_added']
-
- for (k,v) in doc['options'].iteritems():
- all_keys.append(k)
-
- all_keys = sorted(all_keys)
-
- doc['option_keys'] = all_keys
- doc['filename'] = fname
- doc['docuri'] = doc['module'].replace('_', '-')
- doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
- doc['ansible_version'] = options.ansible_version
- doc['plainexamples'] = examples #plain text
-
- # here is where we build the table of contents...
-
- text = template.render(doc)
- write_data(text, options, outputname, module)
- return doc['short_description']
-
-#####################################################################################
-
-def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases):
- modstring = module
- modname = module
- if module in deprecated:
- modstring = modstring + DEPRECATED
- modname = "_" + module
- elif module not in core:
- modstring = modstring + NOTCORE
-
- result = process_module(modname, options, env, template, outputname, module_map, aliases)
-
- if result != "SKIPPED":
- category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module))
-
-def process_category(category, categories, options, env, template, outputname):
-
- module_map = categories[category]
-
- aliases = {}
- if '_aliases' in categories:
- aliases = categories['_aliases']
-
- category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
- category_file = open(category_file_path, "w")
- print "*** recording category %s in %s ***" % (category, category_file_path)
-
- # TODO: start a new category file
-
- category = category.replace("_"," ")
- category = category.title()
-
- modules = []
- deprecated = []
- core = []
- for module in module_map.keys():
-
- if isinstance(module_map[module], dict):
- for mod in module_map[module].keys():
- if mod.startswith("_"):
- mod = mod.replace("_","",1)
- deprecated.append(mod)
- elif '/core/' in module_map[module][mod]:
- core.append(mod)
- else:
- if module.startswith("_"):
- module = module.replace("_","",1)
- deprecated.append(module)
- elif '/core/' in module_map[module]:
- core.append(module)
-
- modules.append(module)
-
- modules.sort()
-
- category_header = "%s Modules" % (category.title())
- underscores = "`" * len(category_header)
-
- category_file.write("""\
-%s
-%s
-
-.. toctree:: :maxdepth: 1
-
-""" % (category_header, underscores))
- sections = []
- for module in modules:
- if module in module_map and isinstance(module_map[module], dict):
- sections.append(module)
- continue
- else:
- print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases)
-
- sections.sort()
- for section in sections:
- category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section)))
- category_file.write(".. toctree:: :maxdepth: 1\n\n")
-
- section_modules = module_map[section].keys()
- section_modules.sort()
- #for module in module_map[section]:
- for module in section_modules:
- print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map[section], aliases)
-
- category_file.write("""\n\n
-.. note::
- - %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale.
- - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less activity maintained than 'core' modules.
- - Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub `_, extras tickets to `ansible/ansible-modules-extras on GitHub `_
-""" % (DEPRECATED, NOTCORE))
- category_file.close()
-
- # TODO: end a new category file
-
-#####################################################################################
-
-def validate_options(options):
- ''' validate option parser options '''
-
- if not options.module_dir:
- print >>sys.stderr, "--module-dir is required"
- sys.exit(1)
- if not os.path.exists(options.module_dir):
- print >>sys.stderr, "--module-dir does not exist: %s" % options.module_dir
- sys.exit(1)
- if not options.template_dir:
- print "--template-dir must be specified"
- sys.exit(1)
-
-#####################################################################################
-
-def main():
-
- p = generate_parser()
-
- (options, args) = p.parse_args()
- validate_options(options)
-
- env, template, outputname = jinja2_environment(options.template_dir, options.type)
-
- categories = list_modules(options.module_dir)
- last_category = None
- category_names = categories.keys()
- category_names.sort()
-
- category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
- category_list_file = open(category_list_path, "w")
- category_list_file.write("Module Index\n")
- category_list_file.write("============\n")
- category_list_file.write("\n\n")
- category_list_file.write(".. toctree::\n")
- category_list_file.write(" :maxdepth: 1\n\n")
-
- for category in category_names:
- if category.startswith("_"):
- continue
- category_list_file.write(" list_of_%s_modules\n" % category)
- process_category(category, categories, options, env, template, outputname)
-
- category_list_file.close()
-
-if __name__ == '__main__':
- main()
diff --git a/v2/hacking/templates/rst.j2 b/v2/hacking/templates/rst.j2
deleted file mode 100644
index 59b8f35474c..00000000000
--- a/v2/hacking/templates/rst.j2
+++ /dev/null
@@ -1,153 +0,0 @@
-.. _@{ module }@:
-
-{% if short_description %}
-{% set title = module + ' - ' + short_description|convert_symbols_to_format %}
-{% else %}
-{% set title = module %}
-{% endif %}
-{% set title_len = title|length %}
-
-@{ title }@
-@{ '+' * title_len }@
-
-.. contents::
- :local:
- :depth: 1
-
-{# ------------------------------------------
- #
- # Please note: this looks like a core dump
- # but it isn't one.
- #
- --------------------------------------------#}
-
-{% if aliases is defined -%}
-Aliases: @{ ','.join(aliases) }@
-{% endif %}
-
-{% if deprecated is defined -%}
-DEPRECATED
-----------
-
-@{ deprecated }@
-{% endif %}
-
-Synopsis
---------
-
-{% if version_added is defined -%}
-.. versionadded:: @{ version_added }@
-{% endif %}
-
-{% for desc in description -%}
-@{ desc | convert_symbols_to_format }@
-{% endfor %}
-
-{% if options -%}
-Options
--------
-
-.. raw:: html
-
-
-
- parameter |
- required |
- default |
- choices |
- comments |
-
- {% for k in option_keys %}
- {% set v = options[k] %}
-
- @{ k }@ |
- {% if v.get('required', False) %}yes{% else %}no{% endif %} |
- {% if v['default'] %}@{ v['default'] }@{% endif %} |
- {% if v.get('type', 'not_bool') == 'bool' %}
- |
- {% else %}
- {% for choice in v.get('choices',[]) -%}- @{ choice }@
{% endfor -%} |
- {% endif %}
- {% for desc in v.description -%}@{ desc | html_ify }@{% endfor -%}{% if v['version_added'] %} (added in Ansible @{v['version_added']}@){% endif %} |
-
- {% endfor %}
-
-{% endif %}
-
-{% if requirements %}
-{% for req in requirements %}
-
-.. note:: Requires @{ req | convert_symbols_to_format }@
-
-{% endfor %}
-{% endif %}
-
-{% if examples or plainexamples %}
-Examples
---------
-
-.. raw:: html
-
-{% for example in examples %}
- {% if example['description'] %}@{ example['description'] | html_ify }@
{% endif %}
-
-
-@{ example['code'] | escape | indent(4, True) }@
-
-
-{% endfor %}
-
-
-{% if plainexamples %}
-
-::
-
-@{ plainexamples | indent(4, True) }@
-{% endif %}
-{% endif %}
-
-{% if notes %}
-{% for note in notes %}
-.. note:: @{ note | convert_symbols_to_format }@
-{% endfor %}
-{% endif %}
-
-
-{% if not deprecated %}
- {% if core %}
-
-This is a Core Module
----------------------
-
-This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo.
-
-If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-
-Should you have a question rather than a bug report, inquiries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
-
-Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
-
-This is a "core" ansible module, which means it will receive slightly higher priority for all requests than those in the "extras" repos.
-
- {% else %}
-
-This is an Extras Module
-------------------------
-
-This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo.
-
-If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-
-Should you have a question rather than a bug report, inquiries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
-
-Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
-
-Note that this module is designated a "extras" module. Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests.
-Popular "extras" modules may be promoted to core modules over time.
-
- {% endif %}
-{% endif %}
-
-For help in developing on modules, should you be so inclined, please read :doc:`community`, :doc:`developing_test_pr` and :doc:`developing_modules`.
-
-
diff --git a/v2/hacking/test-module b/v2/hacking/test-module
deleted file mode 100755
index b672e23e260..00000000000
--- a/v2/hacking/test-module
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-
-# this script is for testing modules without running through the
-# entire guts of ansible, and is very helpful for when developing
-# modules
-#
-# example:
-# test-module -m ../library/commands/command -a "/bin/sleep 3"
-# test-module -m ../library/system/service -a "name=httpd ensure=restarted"
-# test-module -m ../library/system/service -a "name=httpd ensure=restarted" --debugger /usr/bin/pdb
-# test-modulr -m ../library/file/lineinfile -a "dest=/etc/exports line='/srv/home hostname1(rw,sync)'" --check
-
-import sys
-import base64
-import os
-import subprocess
-import traceback
-import optparse
-
-from ansible import utils
-from ansible import module_common
-import ansible.constants as C
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-def parse():
- """parse command line
-
- :return : (options, args)"""
- parser = optparse.OptionParser()
-
- parser.usage = "%prog -[options] (-h for help)"
-
- parser.add_option('-m', '--module-path', dest='module_path',
- help="REQUIRED: full path of module source to execute")
- parser.add_option('-a', '--args', dest='module_args', default="",
- help="module argument string")
- parser.add_option('-D', '--debugger', dest='debugger',
- help="path to python debugger (e.g. /usr/bin/pdb)")
- parser.add_option('-I', '--interpreter', dest='interpreter',
- help="path to interpeter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)",
- metavar='INTERPRETER_TYPE=INTERPRETER_PATH')
- parser.add_option('-c', '--check', dest='check', action='store_true',
- help="run the module in check mode")
- options, args = parser.parse_args()
- if not options.module_path:
- parser.print_help()
- sys.exit(1)
- else:
- return options, args
-
-def write_argsfile(argstring, json=False):
- """ Write args to a file for old-style module's use. """
- argspath = os.path.expanduser("~/.ansible_test_module_arguments")
- argsfile = open(argspath, 'w')
- if json:
- args = utils.parse_kv(argstring)
- argstring = utils.jsonify(args)
- argsfile.write(argstring)
- argsfile.close()
- return argspath
-
-def boilerplate_module(modfile, args, interpreter, check):
- """ simulate what ansible does with new style modules """
-
- #module_fh = open(modfile)
- #module_data = module_fh.read()
- #module_fh.close()
-
- #included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1
-
- complex_args = {}
- if args.startswith("@"):
- # Argument is a YAML file (JSON is a subset of YAML)
- complex_args = utils.combine_vars(complex_args, utils.parse_yaml_from_file(args[1:]))
- args=''
- elif args.startswith("{"):
- # Argument is a YAML document (not a file)
- complex_args = utils.combine_vars(complex_args, utils.parse_yaml(args))
- args=''
-
- inject = {}
- if interpreter:
- if '=' not in interpreter:
- print 'interpeter must by in the form of ansible_python_interpreter=/usr/bin/python'
- sys.exit(1)
- interpreter_type, interpreter_path = interpreter.split('=')
- if not interpreter_type.startswith('ansible_'):
- interpreter_type = 'ansible_%s' % interpreter_type
- if not interpreter_type.endswith('_interpreter'):
- interpreter_type = '%s_interpreter' % interpreter_type
- inject[interpreter_type] = interpreter_path
-
- if check:
- complex_args['CHECKMODE'] = True
-
- (module_data, module_style, shebang) = module_common.modify_module(
- modfile,
- complex_args,
- args,
- inject
- )
-
- modfile2_path = os.path.expanduser("~/.ansible_module_generated")
- print "* including generated source, if any, saving to: %s" % modfile2_path
- print "* this may offset any line numbers in tracebacks/debuggers!"
- modfile2 = open(modfile2_path, 'w')
- modfile2.write(module_data)
- modfile2.close()
- modfile = modfile2_path
-
- return (modfile2_path, module_style)
-
-def runtest( modfile, argspath):
- """Test run a module, piping it's output for reporting."""
-
- os.system("chmod +x %s" % modfile)
-
- invoke = "%s" % (modfile)
- if argspath is not None:
- invoke = "%s %s" % (modfile, argspath)
-
- cmd = subprocess.Popen(invoke, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
-
- try:
- print "***********************************"
- print "RAW OUTPUT"
- print out
- print err
- results = utils.parse_json(out)
- except:
- print "***********************************"
- print "INVALID OUTPUT FORMAT"
- print out
- traceback.print_exc()
- sys.exit(1)
-
- print "***********************************"
- print "PARSED OUTPUT"
- print utils.jsonify(results,format=True)
-
-def rundebug(debugger, modfile, argspath):
- """Run interactively with console debugger."""
-
- if argspath is not None:
- subprocess.call("%s %s %s" % (debugger, modfile, argspath), shell=True)
- else:
- subprocess.call("%s %s" % (debugger, modfile), shell=True)
-
-def main():
-
- options, args = parse()
- (modfile, module_style) = boilerplate_module(options.module_path, options.module_args, options.interpreter, options.check)
-
- argspath=None
- if module_style != 'new':
- if module_style == 'non_native_want_json':
- argspath = write_argsfile(options.module_args, json=True)
- elif module_style == 'old':
- argspath = write_argsfile(options.module_args, json=False)
- else:
- raise Exception("internal error, unexpected module style: %s" % module_style)
- if options.debugger:
- rundebug(options.debugger, modfile, argspath)
- else:
- runtest(modfile, argspath)
-
-if __name__ == "__main__":
- main()
-
diff --git a/v2/scripts/ansible b/v2/scripts/ansible
deleted file mode 100644
index ae8ccff5952..00000000000
--- a/v2/scripts/ansible
+++ /dev/null
@@ -1,20 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
diff --git a/v2/setup.py b/v2/setup.py
deleted file mode 100644
index e982c382f29..00000000000
--- a/v2/setup.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-
-from ansible import __version__
-try:
- from setuptools import setup, find_packages
-except ImportError:
- print("Ansible now needs setuptools in order to build. Install it using"
- " your package manager (usually python-setuptools) or via pip (pip"
- " install setuptools).")
- sys.exit(1)
-
-setup(name='ansible',
- version=__version__,
- description='Radically simple IT automation',
- author='Michael DeHaan',
- author_email='michael@ansible.com',
- url='http://ansible.com/',
- license='GPLv3',
- install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6', 'six >= 1.4.0'],
- # package_dir={ '': 'lib' },
- # packages=find_packages('lib'),
- package_data={
- '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'],
- },
- scripts=[
- 'bin/ansible',
- 'bin/ansible-playbook',
- # 'bin/ansible-pull',
- # 'bin/ansible-doc',
- # 'bin/ansible-galaxy',
- # 'bin/ansible-vault',
- ],
- data_files=[],
-)
diff --git a/v2/test/mock/__init__.py b/v2/test/mock/__init__.py
deleted file mode 100644
index ae8ccff5952..00000000000
--- a/v2/test/mock/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type