From d2db7bad1bed5c00cee4f05852bff1c177040bb5 Mon Sep 17 00:00:00 2001 From: sysadmin75 <krisnewsome@gmail.com> Date: Mon, 1 Jun 2015 13:23:28 -0400 Subject: [PATCH 0001/1113] Fixes OSX fact gathering for the bridge interface. Issue #11104 --- lib/ansible/module_utils/facts.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 1162e05b9cf..f65f776a242 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2163,7 +2163,13 @@ class DarwinNetwork(GenericBsdIfconfigNetwork, Network): current_if['media'] = 'Unknown' # Mac does not give us this current_if['media_select'] = words[1] if len(words) > 2: - current_if['media_type'] = words[2][1:-1] + # MacOSX sets the media to '<unknown type>' for bridge interface + # and parsing splits this into two words; this if/else helps + if words[1] == '<unknown' and words[2] == 'type>': + current_if['media_select'] = 'Unknown' + current_if['media_type'] = 'unknown type' + else: + current_if['media_type'] = words[2][1:-1] if len(words) > 3: current_if['media_options'] = self.get_options(words[3]) From 7a3519bbaa7da3166504505638d1270bd675e7f1 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser <quentin@pobox.com> Date: Tue, 21 Jul 2015 19:25:00 +0100 Subject: [PATCH 0002/1113] Documentation for inventory ignored extensions --- docsite/rst/intro_dynamic_inventory.rst | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 5b634d86cd9..0de7abb80fc 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -218,13 +218,21 @@ to include it in the project. .. _using_multiple_sources: -Using Multiple Inventory Sources -```````````````````````````````` +Using Inventory Directories and Multiple Inventory Sources +`````````````````````````````````````````````````````````` If the location given to -i in Ansible is a directory (or as so configured in ansible.cfg), Ansible can use multiple inventory sources at the same time. When doing so, it is possible to mix both dynamic and statically managed inventory sources in the same ansible run. Instant hybrid cloud! +In an inventory directory, executable files will be treated as dynamic inventory sources and most other files as static sources. Files which end with any of the following will be ignored:: + + ~, .orig, .bak, .ini, .retry, .pyc, .pyo + +You can replace this list with your own selection by configuring an ``inventory_ignore_extensions`` list in ansible.cfg, or setting the ANSIBLE_INVENTORY_IGNORE environment variable. The value in either case should be a comma-separated list of patterns, as shown above. + +Any ``group_vars`` and ``host_vars`` subdirectories in and inventory directory will be interpreted as expected, making inventory directories a powerful way to organize different sets of configurations. + .. _static_groups_of_dynamic: Static Groups of Dynamic Groups From dfd19d6bd8141447135480ac05df25d765e95772 Mon Sep 17 00:00:00 2001 From: Quentin Stafford-Fraser <quentin@pobox.com> Date: Tue, 21 Jul 2015 19:38:49 +0100 Subject: [PATCH 0003/1113] Fix typo in docs --- docsite/rst/intro_dynamic_inventory.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 0de7abb80fc..729a6ba5172 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -231,7 +231,7 @@ In an inventory directory, executable files will be treated as dynamic inventory You can replace this list with your own selection by configuring an ``inventory_ignore_extensions`` list in ansible.cfg, or setting the ANSIBLE_INVENTORY_IGNORE environment variable. The value in either case should be a comma-separated list of patterns, as shown above. -Any ``group_vars`` and ``host_vars`` subdirectories in and inventory directory will be interpreted as expected, making inventory directories a powerful way to organize different sets of configurations. +Any ``group_vars`` and ``host_vars`` subdirectories in an inventory directory will be interpreted as expected, making inventory directories a powerful way to organize different sets of configurations. .. _static_groups_of_dynamic: From 009d0a4bb44dfc299b2212e322a4b93e16d60a4a Mon Sep 17 00:00:00 2001 From: Andy Grimm <agrimm@redhat.com> Date: Mon, 9 Mar 2015 10:49:54 -0400 Subject: [PATCH 0004/1113] Flexible tag-based naming for ec2 hosts Introduces destination_format and destination_format_tags to allow the construction of host names based on one or more ec2 tags and a python format string. --- contrib/inventory/ec2.ini | 10 ++++++++++ contrib/inventory/ec2.py | 11 ++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/contrib/inventory/ec2.ini b/contrib/inventory/ec2.ini index a1d9b1d805d..5bac496ff59 100644 --- a/contrib/inventory/ec2.ini +++ b/contrib/inventory/ec2.ini @@ -40,6 +40,16 @@ destination_variable = public_dns_name # vpc_destination_variable = 'private_ip_address' vpc_destination_variable = ip_address +# The following two settings allow flexible ansible host naming based on a +# python format string and a comma-separated list of ec2 tags. Note that: +# +# 1) If the tags referenced are not present for some instances, empty strings +# will be substituted in the format string. +# 2) This overrides both destination_variable and vpc_destination_variable. +# +#destination_format = {0}.{1}.example.com +#destination_format_tags = Name,environment + # To tag instances on EC2 with the resource records that point to them from # Route53, uncomment and set 'route53' to True. route53 = False diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index a8e042e3f4b..8c8e5e94589 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -220,6 +220,13 @@ class Ec2Inventory(object): # Destination addresses self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') + if config.has_option('ec2', 'destination_format') and \ + config.has_option('ec2', 'destination_format_tags'): + self.destination_format = config.get('ec2', 'destination_format') + self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') + else: + self.destination_format = None + self.destination_format_tags = None # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') @@ -536,7 +543,9 @@ class Ec2Inventory(object): return # Select the best destination address - if instance.subnet_id: + if self.destination_format and self.destination_format_tags: + dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ]) + elif instance.subnet_id: dest = getattr(instance, self.vpc_destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) From be452c1b2792bd15c37b2f418417eeaee4632f3e Mon Sep 17 00:00:00 2001 From: Nathaniel Cohen <ncohen@ucsd.edu> Date: Mon, 14 Sep 2015 14:47:44 -0700 Subject: [PATCH 0005/1113] allow ConfigureRemotingForAnsible.ps1 script to function from 'public' adapters The current script fails on machines which have network interfaces designated as connected to "Public" networks (choices for network designation being Private, Domain, Public). This commit changes the script to NOT prevent winrm initialization when device is connected to a "Public" network. --- examples/scripts/ConfigureRemotingForAnsible.ps1 | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/examples/scripts/ConfigureRemotingForAnsible.ps1 b/examples/scripts/ConfigureRemotingForAnsible.ps1 index a67ea8afb2c..2555b2e97a3 100644 --- a/examples/scripts/ConfigureRemotingForAnsible.ps1 +++ b/examples/scripts/ConfigureRemotingForAnsible.ps1 @@ -1,10 +1,10 @@ -# Configure a Windows host for remote management with Ansible +# Configure a Windows host for remote management with Ansible # ----------------------------------------------------------- # # This script checks the current WinRM/PSRemoting configuration and makes the # necessary changes to allow Ansible to connect, authenticate and execute # PowerShell commands. -# +# # Set $VerbosePreference = "Continue" before running the script in order to # see the output messages. # @@ -17,6 +17,7 @@ Param ( [string]$SubjectName = $env:COMPUTERNAME, [int]$CertValidityDays = 365, + [switch]$SkipNetworkProfileCheck, $CreateSelfSignedCert = $true ) @@ -27,7 +28,7 @@ Function New-LegacySelfSignedCert [string]$SubjectName, [int]$ValidDays = 365 ) - + $name = New-Object -COM "X509Enrollment.CX500DistinguishedName.1" $name.Encode("CN=$SubjectName", 0) @@ -97,8 +98,14 @@ ElseIf ((Get-Service "WinRM").Status -ne "Running") # WinRM should be running; check that we have a PS session config. If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener))) { - Write-Verbose "Enabling PS Remoting." + if ($SkipNetworkProfileCheck) { + Write-Verbose "Enabling PS Remoting without checking Network profile." + Enable-PSRemoting -SkipNetworkProfileCheck -Force -ErrorAction Stop + } + else { + Write-Verbose "Enabling PS Remoting" Enable-PSRemoting -Force -ErrorAction Stop + } } Else { From 8b6f8ff92898f7e1fd9b9db5d71dd1673262402d Mon Sep 17 00:00:00 2001 From: Nathaniel Cohen <ncohen@ucsd.edu> Date: Tue, 22 Sep 2015 11:57:15 -0700 Subject: [PATCH 0006/1113] Document -SkipNetworkProfileCheck switch --- examples/scripts/ConfigureRemotingForAnsible.ps1 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/examples/scripts/ConfigureRemotingForAnsible.ps1 b/examples/scripts/ConfigureRemotingForAnsible.ps1 index 2555b2e97a3..bc8f3898263 100644 --- a/examples/scripts/ConfigureRemotingForAnsible.ps1 +++ b/examples/scripts/ConfigureRemotingForAnsible.ps1 @@ -7,6 +7,10 @@ # # Set $VerbosePreference = "Continue" before running the script in order to # see the output messages. +# Set $SkipNetworkProfileCheck to skip the network profile check. Without +# specifying this the script will only run if the device's interfaces are in +# DOMAIN or PRIVATE zones. Provide this switch if you want to enable winrm on +# a device with an interface in PUBLIC zone. # # Written by Trond Hindenes <trond@hindenes.com> # Updated by Chris Church <cchurch@ansible.com> From fc0801e69bd2615c0bd254ecf2bbe58b44b81eaf Mon Sep 17 00:00:00 2001 From: Nathaniel Cohen <ncohen@ucsd.edu> Date: Tue, 22 Sep 2015 12:45:02 -0700 Subject: [PATCH 0007/1113] describe command line options for ConfigureAnsibleForRemoting --- docsite/rst/intro_windows.rst | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index 6e3cb5bc60d..89d4cc31504 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -8,7 +8,7 @@ Windows Support Windows: How Does It Work ````````````````````````` -As you may have already read, Ansible manages Linux/Unix machines using SSH by default. +As you may have already read, Ansible manages Linux/Unix machines using SSH by default. Starting in version 1.7, Ansible also contains support for managing Windows machines. This uses native PowerShell remoting, rather than SSH. @@ -40,22 +40,22 @@ Installing python-kerberos dependencies # Via Yum yum -y install python-devel krb5-devel krb5-libs krb5-workstation - + # Via Apt (Ubuntu) sudo apt-get install python-dev libkrb5-dev - + # Via Portage (Gentoo) - emerge -av app-crypt/mit-krb5 + emerge -av app-crypt/mit-krb5 emerge -av dev-python/setuptools # Via pkg (FreeBSD) sudo pkg install security/krb5 - + # Via OpenCSW (Solaris) pkgadd -d http://get.opencsw.org/now /opt/csw/bin/pkgutil -U - /opt/csw/bin/pkgutil -y -i libkrb5_3 - + /opt/csw/bin/pkgutil -y -i libkrb5_3 + # Via Pacman (Arch Linux) pacman -S krb5 @@ -115,18 +115,23 @@ Windows System Prep In order for Ansible to manage your windows machines, you will have to enable PowerShell remoting configured. -To automate setup of WinRM, you can run `this PowerShell script <https://github.com/ansible/ansible/blob/devel/examples/scripts/ConfigureRemotingForAnsible.ps1>`_ on the remote machine. +To automate setup of WinRM, you can run `this PowerShell script <https://github.com/ansible/ansible/blob/devel/examples/scripts/ConfigureRemotingForAnsible.ps1>`_ on the remote machine. -Admins may wish to modify this setup slightly, for instance to increase the timeframe of -the certificate. +The example script accepts a few arguments which Admins may choose to use to modify the default setup slightly, which might be appropriate in some cases. + +Pass the -CertValidityDays option to customize the expiration date of the generated certificate. + powershell.exe -File ConfigureRemotingForAnsible.ps1 -CertValidityDays 100 + +Pass the -SkipNetworkProfileCheck switch to configure winrm to listen on PUBLIC zone interfaces. (Without this option, the script will fail if any network interface on device is in PUBLIC zone) + powershell.exe -File ConfigureRemotingForAnsible.ps1 -SkipNetworkProfileCheck .. note:: - On Windows 7 and Server 2008 R2 machines, due to a bug in Windows + On Windows 7 and Server 2008 R2 machines, due to a bug in Windows Management Framework 3.0, it may be necessary to install this hotfix http://support.microsoft.com/kb/2842230 to avoid receiving out of memory and stack overflow exceptions. Newly-installed Server 2008 R2 systems which are not fully up to date with windows updates are known - to have this issue. + to have this issue. Windows 8.1 and Server 2012 R2 are not affected by this issue as they come with Windows Management Framework 4.0. @@ -145,8 +150,8 @@ Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 <h What modules are available `````````````````````````` -Most of the Ansible modules in core Ansible are written for a combination of Linux/Unix machines and arbitrary web services, though there are various -Windows modules as listed in the `"windows" subcategory of the Ansible module index <http://docs.ansible.com/list_of_windows_modules.html>`_. +Most of the Ansible modules in core Ansible are written for a combination of Linux/Unix machines and arbitrary web services, though there are various +Windows modules as listed in the `"windows" subcategory of the Ansible module index <http://docs.ansible.com/list_of_windows_modules.html>`_. Browse this index to see what is available. @@ -275,5 +280,3 @@ form of new modules, tweaks to existing modules, documentation, or something els Questions? Help? Ideas? Stop by the list on Google Groups `irc.freenode.net <http://irc.freenode.net>`_ #ansible IRC chat channel - - From fa332e1342ff99665504f036c82cf90f2fa04433 Mon Sep 17 00:00:00 2001 From: George Sudarkoff <georges@surveymonkey.com> Date: Wed, 7 Oct 2015 10:32:50 -0700 Subject: [PATCH 0008/1113] Fail if the vault password script returns non-zero. --- lib/ansible/cli/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index bf5e33e6be8..9391d386fb4 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -501,6 +501,8 @@ class CLI(object): except OSError as e: raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError("Vault password script %s returned non-zero (%s)." % (this_path, p.returncode)) vault_pass = stdout.strip('\r\n') else: try: From 00ccd2ee6e7ced5b5d7556ed466979ceeacfe3a2 Mon Sep 17 00:00:00 2001 From: Bernhard Lichtinger <bernhard.lichtinger@lrz.de> Date: Mon, 26 Oct 2015 14:22:21 +0100 Subject: [PATCH 0009/1113] Fix for SLES 11.4, which has now also an /etc/os-release file. --- lib/ansible/module_utils/facts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 1ba61bb77f0..4c7571ce3c5 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -418,9 +418,9 @@ class Facts(object): release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line) if release: self.facts['distribution_release'] = release.groups()[0] - elif 'enterprise' in data.lower(): + elif 'enterprise' in data.lower() and 'VERSION_ID' in line: release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line) # SLES doesn't got funny release names - if release: + if release.group(1): release = release.group(1) else: release = "0" # no minor number, so it is the first release From 0bc32cbaeea54a0d27ab2654d4d9eb43064cf735 Mon Sep 17 00:00:00 2001 From: Florian Haas <florian@hastexo.com> Date: Thu, 12 Nov 2015 21:19:40 +0100 Subject: [PATCH 0010/1113] Correct connection type returned by libvirt_lxc inventory script The correct connection type for LXC containers managed via libvirt is libvirt_lxc, not lxc. --- contrib/inventory/libvirt_lxc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/inventory/libvirt_lxc.py b/contrib/inventory/libvirt_lxc.py index 1491afd577d..cb34d473cda 100755 --- a/contrib/inventory/libvirt_lxc.py +++ b/contrib/inventory/libvirt_lxc.py @@ -27,11 +27,11 @@ result['all'] = {} pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True) result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] result['all']['vars'] = {} -result['all']['vars']['ansible_connection'] = 'lxc' +result['all']['vars']['ansible_connection'] = 'libvirt_lxc' if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print(json.dumps({'ansible_connection': 'lxc'})) + print(json.dumps({'ansible_connection': 'libvirt_lxc'})) else: print("Need an argument, either --list or --host <host>") From 1b76a9cef2d74eba9fd786e43f1cf3364a8ac501 Mon Sep 17 00:00:00 2001 From: Jonathan Davila <jdavila@ansible.com> Date: Fri, 13 Nov 2015 18:19:09 -0700 Subject: [PATCH 0011/1113] Patch to remove dependency on boto when only using boto3 Updated with explicit check for HAS_BOTO3 --- lib/ansible/module_utils/ec2.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index ac799772c2c..2edfd9e5d83 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -29,6 +29,7 @@ import os try: import boto3 + import botocore HAS_BOTO3 = True except: HAS_BOTO3 = False @@ -129,10 +130,14 @@ def get_aws_connection_info(module, boto3=False): elif 'EC2_REGION' in os.environ: region = os.environ['EC2_REGION'] else: - # boto.config.get returns None if config not found - region = boto.config.get('Boto', 'aws_region') - if not region: - region = boto.config.get('Boto', 'ec2_region') + if not boto3: + # boto.config.get returns None if config not found + region = boto.config.get('Boto', 'aws_region') + if not region: + region = boto.config.get('Boto', 'ec2_region') + elif boto3 and HAS_BOTO3: + # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None. + region = botocore.session.get_session().get_config_variable('region') if not security_token: if 'AWS_SECURITY_TOKEN' in os.environ: From 9761250a4b179d2064a49e2f4b4a66ba423de26f Mon Sep 17 00:00:00 2001 From: Mick Bass <mick.bass@47lining.com> Date: Sun, 27 Sep 2015 17:12:13 -0600 Subject: [PATCH 0012/1113] Allow tree-ish to be used for galaxy role version Ensure that ansible-galaxy version can be a branch, a tag, or any tree-ish supported by git including specific commit IDs. For git scm roles, adds an explicit git checkout of the specified role_version prior to the git archive. This means that we'll always archive from HEAD of whatever role_version is checked out. role_version can be a branch, a tag, or any <tree-ish> supported by git including specific commit IDs. These changes also ensure ansible-galaxy works for scm clones when specified version differs from repository default branch. --- lib/ansible/galaxy/role.py | 2 -- lib/ansible/playbook/role/requirement.py | 11 +++++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py index dc9da5d79ce..5acd26c92dc 100644 --- a/lib/ansible/galaxy/role.py +++ b/lib/ansible/galaxy/role.py @@ -310,5 +310,3 @@ class GalaxyRole(object): } """ return dict(scm=self.scm, src=self.src, version=self.version, name=self.name) - - diff --git a/lib/ansible/playbook/role/requirement.py b/lib/ansible/playbook/role/requirement.py index 1a640247e25..807dd1e82fd 100644 --- a/lib/ansible/playbook/role/requirement.py +++ b/lib/ansible/playbook/role/requirement.py @@ -190,6 +190,17 @@ class RoleRequirement(RoleDefinition): if rc != 0: raise AnsibleError ("- command %s failed in directory %s (rc=%s)" % (' '.join(clone_cmd), tempdir, rc)) + if scm == 'git' and version: + checkout_cmd = [scm, 'checkout', version] + with open('/dev/null', 'w') as devnull: + try: + popen = subprocess.Popen(checkout_cmd, cwd=os.path.join(tempdir, name), stdout=devnull, stderr=devnull) + except (IOError, OSError): + raise AnsibleError("error executing: %s" % " ".join(checkout_cmd)) + rc = popen.wait() + if rc != 0: + raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(checkout_cmd), tempdir, rc)) + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar') if scm == 'hg': archive_cmd = ['hg', 'archive', '--prefix', "%s/" % name] From ff19233ad33dc989e997e69b4f36cab56fae74da Mon Sep 17 00:00:00 2001 From: Will Thames <will@thames.id.au> Date: Thu, 22 Oct 2015 14:18:48 +1000 Subject: [PATCH 0013/1113] Add tests for #10620 --- test/integration/galaxy_roles.yml | 2 +- test/integration/galaxy_rolesfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/galaxy_roles.yml b/test/integration/galaxy_roles.yml index 5f4373c5004..3d2121f1683 100644 --- a/test/integration/galaxy_roles.yml +++ b/test/integration/galaxy_roles.yml @@ -3,7 +3,7 @@ name: oracle_java7 - src: git+http://bitbucket.org/willthames/git-ansible-galaxy - version: v1.6 + version: pr-10620 - src: http://bitbucket.org/willthames/hg-ansible-galaxy scm: hg diff --git a/test/integration/galaxy_rolesfile b/test/integration/galaxy_rolesfile index b78cdc11481..047eef95502 100644 --- a/test/integration/galaxy_rolesfile +++ b/test/integration/galaxy_rolesfile @@ -1,7 +1,7 @@ # deliberate non-empty whitespace line to follow -git+https://bitbucket.org/willthames/git-ansible-galaxy,v1.6 +git+https://bitbucket.org/willthames/git-ansible-galaxy,pr-10620 hg+https://bitbucket.org/willthames/hg-ansible-galaxy https://bitbucket.org/willthames/http-ansible-galaxy/get/master.tar.gz,,http-role # comment From 6d6d4f0c8e2b7d9a6883780d35cb56fedb8b2224 Mon Sep 17 00:00:00 2001 From: Arata Notsu <notsu@virtualtech.jp> Date: Tue, 1 Dec 2015 23:47:22 +0900 Subject: [PATCH 0014/1113] BOOLEAN should contain boolean literals It is natural that an argument_spec with choises=BOOLEAN accepts boolean literal (True, False) though the current implementation allows only string or int. --- lib/ansible/module_utils/basic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index d2cf09458ea..95857339539 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -34,8 +34,8 @@ ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>" MODULE_ARGS = "<<INCLUDE_ANSIBLE_MODULE_ARGS>>" MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>" -BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1] -BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0] +BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True] +BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False] BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>" From 7724c958e149e1b9dc1021936b31956792013e38 Mon Sep 17 00:00:00 2001 From: Ming Qian <bruceharbin@gmail.com> Date: Tue, 1 Dec 2015 11:24:17 -0800 Subject: [PATCH 0015/1113] Update intro_windows.rst first pull. thanks. --- docsite/rst/intro_windows.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index e5cbb94fafd..316d1eca1ac 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -166,6 +166,8 @@ In group_vars/windows.yml, define the following inventory variables:: ansible_port: 5986 ansible_connection: winrm +Attention for the older style variables (``ansible_ssh_*``): ansible_ssh_password doesn't exist, should be ansible_ssh_pass. + Although Ansible is mostly an SSH-oriented system, Windows management will not happen over SSH (`yet <http://blogs.msdn.com/b/powershell/archive/2015/06/03/looking-forward-microsoft-support-for-secure-shell-ssh.aspx>`). If you have installed the ``kerberos`` module and ``ansible_user`` contains ``@`` (e.g. ``username@realm``), Ansible will first attempt Kerberos authentication. *This method uses the principal you are authenticated to Kerberos with on the control machine and not ``ansible_user``*. If that fails, either because you are not signed into Kerberos on the control machine or because the corresponding domain account on the remote host is not available, then Ansible will fall back to "plain" username/password authentication. From 29f5c5db7178b3bb26f4dd8410269a44d17e5315 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Thu, 3 Dec 2015 12:50:23 -0500 Subject: [PATCH 0016/1113] bugfix for ios.py shared module argument creation This patch fixes a bug in module_utils/ios.py where the the wrong shared module arguments are being generated. This bug prevented the shared module from operating correctly. This patch should be generally applied. --- lib/ansible/module_utils/ios.py | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/lib/ansible/module_utils/ios.py b/lib/ansible/module_utils/ios.py index dc46a860c6a..085b68dcd28 100644 --- a/lib/ansible/module_utils/ios.py +++ b/lib/ansible/module_utils/ios.py @@ -80,7 +80,7 @@ def ios_module(**kwargs): """ spec = kwargs.get('argument_spec') or dict() - argument_spec = url_argument_spec() + argument_spec = shell_argument_spec() argument_spec.update(IOS_COMMON_ARGS) if kwargs.get('argument_spec'): argument_spec.update(kwargs['argument_spec']) @@ -150,21 +150,6 @@ class IosShell(object): responses.append(response) return responses -def ios_from_args(module): - """Extracts the set of argumetns to build a valid IOS connection - """ - params = dict() - for arg, attrs in IOS_COMMON_ARGS.iteritems(): - if module.params['device']: - params[arg] = module.params['device'].get(arg) - if arg not in params or module.params[arg]: - params[arg] = module.params[arg] - if params[arg] is None: - if attrs.get('required'): - module.fail_json(msg='argument %s is required' % arg) - params[arg] = attrs.get('default') - return params - def ios_connection(module): """Creates a connection to an IOS device based on the module arguments """ @@ -180,16 +165,16 @@ def ios_connection(module): shell = IosShell() shell.connect(host, port=port, username=username, password=password, timeout=timeout) + shell.send('terminal length 0') except paramiko.ssh_exception.AuthenticationException, exc: module.fail_json(msg=exc.message) except socket.error, exc: module.fail_json(msg=exc.strerror, errno=exc.errno) - shell.send('terminal length 0') - if module.params['enable_mode']: shell.authorize(module.params['enable_password']) return shell + From a1f516824ee2160121437edf6939ab2145972739 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Dec 2015 18:23:08 -0800 Subject: [PATCH 0017/1113] corrected playbook path, reformated options help the last just to make the help consistent and readable --- lib/ansible/cli/pull.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 04586c1d0c5..9cc6c25e9f2 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -74,8 +74,10 @@ class PullCLI(CLI): help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests') self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated') - self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', help='directory to checkout repository to') - self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') + self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', + help='directory to checkout repository to') + self.parser.add_option('-U', '--url', dest='url', default=None, + help='URL of the playbook repository') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', @@ -174,8 +176,7 @@ class PullCLI(CLI): display.display("Repository has not changed, quitting.") return 0 - playbook = self.select_playbook(path) - + playbook = self.select_playbook(self.options.dest) if playbook is None: raise AnsibleOptionsError("Could not find a playbook to run.") From 8d5f36a6c23ad17116ee0bb24c07f83745efb8e0 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Dec 2015 19:39:57 -0800 Subject: [PATCH 0018/1113] return unique list of hosts --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index fdcbd37e78e..59a3c37bf93 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -196,7 +196,7 @@ class Inventory(object): hosts = [ h for h in hosts if h in self._restriction ] HOSTS_PATTERNS_CACHE[pattern_hash] = hosts[:] - return hosts + return list(set(hosts)) @classmethod def split_host_pattern(cls, pattern): From e1c62fb5afd5344dc1f3ff1606803263218b79ea Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Dec 2015 19:42:05 -0800 Subject: [PATCH 0019/1113] reverted to previous pull checkout dir behaviour This fixes bugs with not finding plays when not specifying checkout dir Also makes it backwards compatible --- lib/ansible/cli/pull.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 9cc6c25e9f2..b2e402126da 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -74,7 +74,7 @@ class PullCLI(CLI): help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests') self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated') - self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', + self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') @@ -90,6 +90,11 @@ class PullCLI(CLI): self.options, self.args = self.parser.parse_args() + if not self.options.dest: + hostname = socket.getfqdn() + # use a hostname dependent directory, in case of $HOME on nfs + self.options.dest = os.path.join('~/.ansible/pull', hostname) + if self.options.sleep: try: secs = random.randint(0,int(self.options.sleep)) From d5446f98046d379ec950b849317472982dcba757 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Dec 2015 20:47:02 -0800 Subject: [PATCH 0020/1113] fixed ansible-pull broken options * sudo was not working, now it supports full become * now default checkout dir works, not only when specifying * paths for checkout dir get expanded * fixed limit options for playbook * added verbose and debug info --- lib/ansible/cli/__init__.py | 12 +++++++----- lib/ansible/cli/pull.py | 25 ++++++++++++++++--------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index da4d1b92d3d..da1aabcc698 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -210,7 +210,7 @@ class CLI(object): @staticmethod def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False, - async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False): + async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False, runas_prompt_opts=False): ''' create an options parser for most ansible scripts ''' # TODO: implement epilog parsing @@ -267,10 +267,6 @@ class CLI(object): if runas_opts: # priv user defaults to root later on to enable detecting when this option was given here - parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', - help='ask for sudo password (deprecated, use become)') - parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', - help='ask for su password (deprecated, use become)') parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, @@ -287,6 +283,12 @@ class CLI(object): help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS))) parser.add_option('--become-user', default=None, dest='become_user', type='string', help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER) + + if runas_opts or runas_prompt_opts: + parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', + help='ask for sudo password (deprecated, use become)') + parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', + help='ask for su password (deprecated, use become)') parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', help='ask for privilege escalation password') diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index b2e402126da..1543c704d57 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -64,10 +64,12 @@ class PullCLI(CLI): subset_opts=True, inventory_opts=True, module_opts=True, + runas_prompt_opts=True, ) # options unique to pull - self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run') + self.parser.add_option('--purge', default=False, action='store_true', + help='purge checkout after playbook run') self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', help='only run the playbook if the repository has been updated') self.parser.add_option('-s', '--sleep', dest='sleep', default=None, @@ -94,6 +96,7 @@ class PullCLI(CLI): hostname = socket.getfqdn() # use a hostname dependent directory, in case of $HOME on nfs self.options.dest = os.path.join('~/.ansible/pull', hostname) + self.options.dest = os.path.expandvars(os.path.expanduser(self.options.dest)) if self.options.sleep: try: @@ -126,7 +129,7 @@ class PullCLI(CLI): node = platform.node() host = socket.getfqdn() limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]])) - base_opts = '-c local "%s"' % limit_opts + base_opts = '-c local ' if self.options.verbosity > 0: base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ]) @@ -137,7 +140,7 @@ class PullCLI(CLI): else: inv_opts = self.options.inventory - #TODO: enable more repo modules hg/svn? + #FIXME: enable more repo modules hg/svn? if self.options.module_name == 'git': repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest) if self.options.checkout: @@ -157,8 +160,8 @@ class PullCLI(CLI): raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) - cmd = '%s/ansible -i "%s" %s -m %s -a "%s"' % ( - bin_path, inv_opts, base_opts, self.options.module_name, repo_opts + cmd = '%s/ansible -i "%s" %s -m %s -a "%s" "%s"' % ( + bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts ) for ev in self.options.extra_vars: @@ -170,6 +173,8 @@ class PullCLI(CLI): time.sleep(self.options.sleep) # RUN the Checkout command + display.debug("running ansible with VCS module to checkout repo") + display.vvvv('EXEC: %s' % cmd) rc, out, err = run_cmd(cmd, live=True) if rc != 0: @@ -193,16 +198,18 @@ class PullCLI(CLI): cmd += ' -i "%s"' % self.options.inventory for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev - if self.options.ask_sudo_pass: - cmd += ' -K' + if self.options.ask_sudo_pass or self.options.ask_su_pass or self.options.become_ask_pass: + cmd += ' --ask-become-pass' if self.options.tags: cmd += ' -t "%s"' % self.options.tags - if self.options.limit: - cmd += ' -l "%s"' % self.options.limit + if self.options.subset: + cmd += ' -l "%s"' % self.options.subset os.chdir(self.options.dest) # RUN THE PLAYBOOK COMMAND + display.debug("running ansible-playbook to do actual work") + display.debug('EXEC: %s' % cmd) rc, out, err = run_cmd(cmd, live=True) if self.options.purge: From e385c91fa528cb5e835077331512307b231ba393 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 4 Dec 2015 09:57:06 -0800 Subject: [PATCH 0021/1113] Update submodule refs# --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index cd9a7667aa3..191347676ee 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit cd9a7667aa39bbc1ccd606ebebaf3c62f228d601 +Subproject commit 191347676eea08817da3fb237f24cdbf2d16e307 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 3c4f954f0fe..a10bdd6be94 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 3c4f954f0fece5dcb3241d6d5391273334206241 +Subproject commit a10bdd6be948d3aa5fad7ff4959908d6e78e0528 From 750adbaa270bca5a63f443808a7b8ddc2a026d9a Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 4 Dec 2015 12:48:56 -0500 Subject: [PATCH 0022/1113] Changing up how host (in)equality is checked Fixes #13397 --- lib/ansible/inventory/dir.py | 2 +- lib/ansible/inventory/host.py | 2 +- test/units/inventory/test_host.py | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py index e4f7ee80f92..e716987fd5f 100644 --- a/lib/ansible/inventory/dir.py +++ b/lib/ansible/inventory/dir.py @@ -205,7 +205,7 @@ class InventoryDirectory(object): # because the __eq__/__ne__ methods in Host() compare the # name fields rather than references, we use id() here to # do the object comparison for merges - if id(self.hosts[host.name]) != id(host): + if self.hosts[host.name] != host: # different object, merge self._merge_hosts(self.hosts[host.name], host) diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index a561b951b45..a433463fa1b 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -38,7 +38,7 @@ class Host: def __eq__(self, other): if not isinstance(other, Host): return False - return self.name == other.name + return id(self) == id(other) def __ne__(self, other): return not self.__eq__(other) diff --git a/test/units/inventory/test_host.py b/test/units/inventory/test_host.py index 078d4321b57..5c0945f7b4e 100644 --- a/test/units/inventory/test_host.py +++ b/test/units/inventory/test_host.py @@ -29,9 +29,7 @@ class TestHost(unittest.TestCase): def test_equality(self): self.assertEqual(self.hostA, self.hostA) self.assertNotEqual(self.hostA, self.hostB) - self.assertEqual(self.hostA, Host('a')) - # __ne__ is a separate method - self.assertFalse(self.hostA != Host('a')) + self.assertNotEqual(self.hostA, Host('a')) def test_hashability(self): # equality implies the hash values are the same From 84507aedd4b4a4be48acf9657b90bb341c3bd1e2 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 4 Dec 2015 13:33:27 -0500 Subject: [PATCH 0023/1113] Adding a uuid field so we can track host equality across serialization too --- lib/ansible/inventory/host.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index a433463fa1b..6263dcbc80d 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import uuid + from ansible.inventory.group import Group from ansible.utils.vars import combine_vars @@ -38,7 +40,7 @@ class Host: def __eq__(self, other): if not isinstance(other, Host): return False - return id(self) == id(other) + return self._uuid == other._uuid def __ne__(self, other): return not self.__eq__(other) @@ -55,6 +57,7 @@ class Host: name=self.name, vars=self.vars.copy(), address=self.address, + uuid=self._uuid, gathered_facts=self._gathered_facts, groups=groups, ) @@ -65,6 +68,7 @@ class Host: self.name = data.get('name') self.vars = data.get('vars', dict()) self.address = data.get('address', '') + self._uuid = data.get('uuid', uuid.uuid4()) groups = data.get('groups', []) for group_data in groups: @@ -84,6 +88,7 @@ class Host: self.set_variable('ansible_port', int(port)) self._gathered_facts = False + self._uuid = uuid.uuid4() def __repr__(self): return self.get_name() From 0434644d12c64918d5182a7c0b0057687b1cdbc2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 4 Dec 2015 11:50:39 -0800 Subject: [PATCH 0024/1113] Transform exceptions into ansible messages via to_unicode instead of str to avoid tracebacks. Fixes #13385 --- lib/ansible/executor/task_executor.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 4a7d7464ef8..5d7430fad25 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -146,7 +146,7 @@ class TaskExecutor: except AttributeError: pass except Exception as e: - display.debug("error closing connection: %s" % to_unicode(e)) + display.debug(u"error closing connection: %s" % to_unicode(e)) def _get_loop_items(self): ''' @@ -183,7 +183,7 @@ class TaskExecutor: loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: - if 'has no attribute' in str(e): + if u'has no attribute' in to_unicode(e): loop_terms = [] display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.") else: @@ -231,7 +231,7 @@ class TaskExecutor: tmp_task = self._task.copy() tmp_play_context = self._play_context.copy() except AnsibleParserError as e: - results.append(dict(failed=True, msg=str(e))) + results.append(dict(failed=True, msg=to_unicode(e))) continue # now we swap the internal task and play context with their copies, @@ -401,7 +401,7 @@ class TaskExecutor: try: result = self._handler.run(task_vars=variables) except AnsibleConnectionFailure as e: - return dict(unreachable=True, msg=str(e)) + return dict(unreachable=True, msg=to_unicode(e)) display.debug("handler run complete") if self._task.async > 0: @@ -412,7 +412,7 @@ class TaskExecutor: return result result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: - return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e)) + return dict(failed=True, msg=u"The async task did not return valid JSON: %s" % to_unicode(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) From e8954e556a6f36e0eaeb8160bc04171ed655c43f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CBrice?= <brice.kollee@gmail.com> Date: Fri, 4 Dec 2015 16:24:19 -0500 Subject: [PATCH 0025/1113] comment examples in default hosts file --- examples/hosts | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/examples/hosts b/examples/hosts index ce4cbb7caa4..841f4bc6500 100644 --- a/examples/hosts +++ b/examples/hosts @@ -10,35 +10,35 @@ # Ex 1: Ungrouped hosts, specify before any group headers. -green.example.com -blue.example.com -192.168.100.1 -192.168.100.10 +## green.example.com +## blue.example.com +## 192.168.100.1 +## 192.168.100.10 # Ex 2: A collection of hosts belonging to the 'webservers' group -[webservers] -alpha.example.org -beta.example.org -192.168.1.100 -192.168.1.110 +## [webservers] +## alpha.example.org +## beta.example.org +## 192.168.1.100 +## 192.168.1.110 # If you have multiple hosts following a pattern you can specify # them like this: -www[001:006].example.com +## www[001:006].example.com # Ex 3: A collection of database servers in the 'dbservers' group -[dbservers] - -db01.intranet.mydomain.net -db02.intranet.mydomain.net -10.25.1.56 -10.25.1.57 +## [dbservers] +## +## db01.intranet.mydomain.net +## db02.intranet.mydomain.net +## 10.25.1.56 +## 10.25.1.57 # Here's another example of host ranges, this time there are no # leading 0s: -db-[99:101]-node.example.com +## db-[99:101]-node.example.com From 1eb0a1ddf7cf2f9501ea48915307652e8ab55049 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 4 Dec 2015 15:16:02 -0800 Subject: [PATCH 0026/1113] Correct VERSION in the devel branch --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 879b416e609..7ec1d6db408 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1 +2.1.0 From a96a879fcf8c80ee37ff3898f729d7baeac1cd6f Mon Sep 17 00:00:00 2001 From: sam-at-github <sgpinkus@gmail.com> Date: Sat, 5 Dec 2015 13:06:58 +1100 Subject: [PATCH 0027/1113] Add fullstop to make sentence make sense. Touch parargraph while at it. --- docsite/rst/playbooks_variables.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 18f1e57f728..307387a72e5 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -793,8 +793,8 @@ Basically, anything that goes into "role defaults" (the defaults folder inside t .. rubric:: Footnotes -.. [1] Tasks in each role will see their own role's defaults tasks outside of roles will the last role's defaults -.. [2] Variables defined in inventory file or provided by dynamic inventory +.. [1] Tasks in each role will see their own role's defaults. Tasks defined outside of a role will see the last role's defaults. +.. [2] Variables defined in inventory file or provided by dynamic inventory. .. note:: Within a any section, redefining a var will overwrite the previous instance. If multiple groups have the same variable, the last one loaded wins. From fa71c38c2a7332ed450464e9239aac6e6698b095 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 5 Dec 2015 01:47:35 -0500 Subject: [PATCH 0028/1113] updated pull location in changelog it was in between of backslash description and example --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9f8b4b76a9..d246be10933 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,9 +37,9 @@ Ansible Changes By Release * New ssh configuration variables(`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a per-group or per-host ssh ProxyCommand or set any other ssh options. `ansible_ssh_extra_args` is used to set options that are accepted only by ssh (not sftp or scp, which have their own analogous settings). +* ansible-pull can now verify the code it runs when using git as a source repository, using git's code signing and verification features. * Backslashes used when specifying parameters in jinja2 expressions in YAML dicts sometimes needed to be escaped twice. This has been fixed so that escaping once works. Here's an example of how playbooks need to be modified: -* ansible-pull can now verify the code it runs when using git as a source repository, using git's code signing and verification features. ``` # Syntax in 1.9.x From 0129fb0a44080d324d110c3d5c5223ab2aa138b2 Mon Sep 17 00:00:00 2001 From: Nils Steinger <git@n-st.de> Date: Sat, 5 Dec 2015 15:28:37 +0100 Subject: [PATCH 0029/1113] Remove duplicates from host list *before* caching it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ansible previously added hosts to the host list multiple times for commands like `ansible -i 'localhost,' -c local -m ping 'localhost,localhost' --list-hosts`. 8d5f36a fixed the obvious error, but still added the un-deduplicated list to a cache, so all future invocations of get_hosts() would retrieve a non-deduplicated list. This caused problems down the line: For some reason, Ansible only ever schedules "flush_handlers" tasks (instead of scheduling any actual tasks from the playbook) for hosts that are contained in the host lists multiple times. This probably happens because the host states are stored in a dictionary indexed by the hostnames, so duplicate hostname would cause the state to be overwritten by subsequent invocations of … something. --- lib/ansible/inventory/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 59a3c37bf93..14cd169265b 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -195,8 +195,8 @@ class Inventory(object): if self._restriction is not None: hosts = [ h for h in hosts if h in self._restriction ] - HOSTS_PATTERNS_CACHE[pattern_hash] = hosts[:] - return list(set(hosts)) + HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) + return HOSTS_PATTERNS_CACHE[pattern_hash][:] @classmethod def split_host_pattern(cls, pattern): From a1f6d17e37b059aa9d34a004b0aed05a6b8fa3b3 Mon Sep 17 00:00:00 2001 From: Nils Steinger <git@n-st.de> Date: Sat, 5 Dec 2015 15:40:49 +0100 Subject: [PATCH 0030/1113] More meaningful string representation for meta tasks (like 'noop' and 'flush_handlers') --- lib/ansible/playbook/task.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 4f326b628bc..21dbc87becf 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -133,7 +133,10 @@ class Task(Base, Conditional, Taggable, Become): def __repr__(self): ''' returns a human readable representation of the task ''' - return "TASK: %s" % self.get_name() + if self.get_name() == 'meta ': + return "TASK: meta (%s)" % self.args['_raw_params'] + else: + return "TASK: %s" % self.get_name() def _preprocess_loop(self, ds, new_ds, k, v): ''' take a lookup plugin name and store it correctly ''' From f89f906f87c2c4d850702404f70cfabaa63be351 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 5 Dec 2015 10:10:25 -0500 Subject: [PATCH 0031/1113] simplified get_hosts code to have 1 retrun point --- lib/ansible/inventory/__init__.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 14cd169265b..d7d0f03fb1f 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -178,24 +178,24 @@ class Inventory(object): if self._restriction: pattern_hash += u":%s" % to_unicode(self._restriction) - if pattern_hash in HOSTS_PATTERNS_CACHE: - return HOSTS_PATTERNS_CACHE[pattern_hash][:] + if pattern_hash not in HOSTS_PATTERNS_CACHE: - patterns = Inventory.split_host_pattern(pattern) - hosts = self._evaluate_patterns(patterns) + patterns = Inventory.split_host_pattern(pattern) + hosts = self._evaluate_patterns(patterns) - # mainly useful for hostvars[host] access - if not ignore_limits_and_restrictions: - # exclude hosts not in a subset, if defined - if self._subset: - subset = self._evaluate_patterns(self._subset) - hosts = [ h for h in hosts if h in subset ] + # mainly useful for hostvars[host] access + if not ignore_limits_and_restrictions: + # exclude hosts not in a subset, if defined + if self._subset: + subset = self._evaluate_patterns(self._subset) + hosts = [ h for h in hosts if h in subset ] - # exclude hosts mentioned in any restriction (ex: failed hosts) - if self._restriction is not None: - hosts = [ h for h in hosts if h in self._restriction ] + # exclude hosts mentioned in any restriction (ex: failed hosts) + if self._restriction is not None: + hosts = [ h for h in hosts if h in self._restriction ] + + HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) - HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) return HOSTS_PATTERNS_CACHE[pattern_hash][:] @classmethod From 8ea45e8608fc15e07493b11ce28fe3d3f38865b8 Mon Sep 17 00:00:00 2001 From: Luca Berruti <nadirio@gmail.com> Date: Sat, 5 Dec 2015 19:43:02 +0100 Subject: [PATCH 0032/1113] Make no_target_syslog consistent. no_target_syslog = False --> do log on target --- examples/ansible.cfg | 2 +- lib/ansible/constants.py | 2 +- lib/ansible/plugins/action/__init__.py | 2 +- lib/ansible/plugins/action/async.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 74aef7a0246..87c089f45ae 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -182,7 +182,7 @@ #no_log = False # prevents logging of tasks, but only on the targets, data is still logged on the master/controller -#no_target_syslog = True +#no_target_syslog = False # controls the compression level of variables sent to # worker processes. At the default of 0, no compression diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 08d522fcb60..6faae928dbe 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -159,7 +159,7 @@ DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', # disclosure DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True) -DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', True, boolean=True) +DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True) # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 64a3b51e5d3..497143224a7 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -382,7 +382,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): module_args['_ansible_check_mode'] = True # set no log in the module arguments, if required - if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG: + if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG: module_args['_ansible_no_log'] = True # set debug in the module arguments, if required diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py index 51e2413af27..8a7175aeb86 100644 --- a/lib/ansible/plugins/action/async.py +++ b/lib/ansible/plugins/action/async.py @@ -48,7 +48,7 @@ class ActionModule(ActionBase): env_string = self._compute_environment_string() module_args = self._task.args.copy() - if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG: + if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG: module_args['_ansible_no_log'] = True # configure, upload, and chmod the target module From 955710267c1992c5e3b5b9eb77f4c76e289e3313 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 5 Dec 2015 15:59:51 -0500 Subject: [PATCH 0033/1113] only set become defaults at last possible moment tasks were overriding commandline with their defaults, not with the explicit setting, removed the setting of defaults from task init and pushed down to play context at last possible moment. fixes #13362 --- lib/ansible/playbook/become.py | 16 +++++++++------- lib/ansible/playbook/play_context.py | 3 +++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py index 643f2b555d5..1e579751d46 100644 --- a/lib/ansible/playbook/become.py +++ b/lib/ansible/playbook/become.py @@ -90,16 +90,18 @@ class Become: display.deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)") - # if we are becoming someone else, but some fields are unset, - # make sure they're initialized to the default config values - if ds.get('become', False): - if ds.get('become_method', None) is None: - ds['become_method'] = C.DEFAULT_BECOME_METHOD - if ds.get('become_user', None) is None: - ds['become_user'] = C.DEFAULT_BECOME_USER return ds + def set_become_defaults(self, become, become_method, become_user): + ''' if we are becoming someone else, but some fields are unset, + make sure they're initialized to the default config values ''' + if become: + if become_method is None: + become_method = C.DEFAULT_BECOME_METHOD + if become_user is None: + become_user = C.DEFAULT_BECOME_USER + def _get_attr_become(self): ''' Override for the 'become' getattr fetcher, used from Base. diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 5c020939808..9320a23ed9b 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -392,6 +392,9 @@ class PlayContext(Base): if new_info.no_log is None: new_info.no_log = C.DEFAULT_NO_LOG + # set become defaults if not previouslly set + task.set_become_defaults(new_info.become, new_info.become_method, new_info.become_user) + return new_info def make_become_cmd(self, cmd, executable=None): From 41773630edcf8ab138a36290c4904c6ba537390b Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Mon, 23 Nov 2015 22:01:27 -0500 Subject: [PATCH 0034/1113] adds new device argument to nxapi command arguments The device argument allows a dict of nxapi parameters to be passed to the module to simplify passing the nxapi parameters --- lib/ansible/module_utils/nxapi.py | 75 ++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 25 deletions(-) diff --git a/lib/ansible/module_utils/nxapi.py b/lib/ansible/module_utils/nxapi.py index 0589b9a50c3..35bcc442fbd 100644 --- a/lib/ansible/module_utils/nxapi.py +++ b/lib/ansible/module_utils/nxapi.py @@ -32,16 +32,16 @@ from ansible.module_utils.nxapi import * The nxapi module provides the following common argument spec: - * host (str) - [Required] The IPv4 address or FQDN of the network device + * host (str) - The IPv4 address or FQDN of the network device * port (str) - Overrides the default port to use for the HTTP/S connection. The default values are 80 for HTTP and 443 for HTTPS - * url_username (str) - [Required] The username to use to authenticate + * username (str) - The username to use to authenticate the HTTP/S connection. Aliases: username - * url_password (str) - [Required] The password to use to authenticate + * password (str) - The password to use to authenticate the HTTP/S connection. Aliases: password * use_ssl (bool) - Specifies whether or not to use an encrypted (HTTPS) @@ -51,6 +51,10 @@ The nxapi module provides the following common argument spec: device. Valid values in `cli_show`, `cli_show_ascii`, 'cli_conf` and `bash`. The default value is `cli_show_ascii` + * device (dict) - Used to send the entire set of connection parameters + as a dict object. This argument is mutually exclusive with the + host argument + In order to communicate with Cisco NXOS devices, the NXAPI feature must be enabled and configured on the device. @@ -58,34 +62,52 @@ must be enabled and configured on the device. NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash'] -def nxapi_argument_spec(spec=None): - """Creates an argument spec for working with NXAPI - """ - arg_spec = url_argument_spec() - arg_spec.update(dict( - host=dict(required=True), - port=dict(), - url_username=dict(required=True, aliases=['username']), - url_password=dict(required=True, aliases=['password']), - use_ssl=dict(default=False, type='bool'), - command_type=dict(default='cli_show_ascii', choices=NXAPI_COMMAND_TYPES) - )) - if spec: - arg_spec.update(spec) - return arg_spec +NXAPI_COMMON_ARGS = dict( + host=dict(), + port=dict(), + username=dict(), + password=dict(), + use_ssl=dict(default=False, type='bool'), + device=dict(), + command_type=dict(default='cli_show_ascii', choices=NXAPI_COMMAND_TYPES) +) -def nxapi_url(module): +def nxapi_module(**kwargs): + """Append the common args to the argument_spec + """ + spec = kwargs.get('argument_spec') or dict() + + argument_spec = url_argument_spec() + argument_spec.update(NXAPI_COMMON_ARGS) + if kwargs.get('argument_spec'): + argument_spec.update(kwargs['argument_spec']) + kwargs['argument_spec'] = argument_spec + + module = AnsibleModule(**kwargs) + + device = module.params.get('device') or dict() + for key, value in device.iteritems(): + if key in NXAPI_COMMON_ARGS: + module.params[key] = value + + params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + for key, value in params.iteritems(): + if key != 'device': + module.params[key] = value + + return module + +def nxapi_url(params): """Constructs a valid NXAPI url """ - if module.params['use_ssl']: + if params['use_ssl']: proto = 'https' else: proto = 'http' - host = module.params['host'] + host = params['host'] url = '{}://{}'.format(proto, host) - port = module.params['port'] - if module.params['port']: - url = '{}:{}'.format(url, module.params['port']) + if params['port']: + url = '{}:{}'.format(url, params['port']) url = '{}/ins'.format(url) return url @@ -109,7 +131,7 @@ def nxapi_body(commands, command_type, **kwargs): def nxapi_command(module, commands, command_type=None, **kwargs): """Sends the list of commands to the device over NXAPI """ - url = nxapi_url(module) + url = nxapi_url(module.params) command_type = command_type or module.params['command_type'] @@ -118,6 +140,9 @@ def nxapi_command(module, commands, command_type=None, **kwargs): headers = {'Content-Type': 'text/json'} + module.params['url_username'] = module.params['username'] + module.params['url_password'] = module.params['password'] + response, headers = fetch_url(module, url, data=data, headers=headers, method='POST') From a8e015cc22d248e965157605e30b810de280b0a4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 6 Dec 2015 22:12:48 -0800 Subject: [PATCH 0035/1113] Add representers so we can output yaml for all the types we read in from yaml --- lib/ansible/parsing/yaml/dumper.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/ansible/parsing/yaml/dumper.py b/lib/ansible/parsing/yaml/dumper.py index a51289b09b9..a8a5015b8ea 100644 --- a/lib/ansible/parsing/yaml/dumper.py +++ b/lib/ansible/parsing/yaml/dumper.py @@ -22,7 +22,7 @@ __metaclass__ = type import yaml from ansible.compat.six import PY3 -from ansible.parsing.yaml.objects import AnsibleUnicode +from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping from ansible.vars.hostvars import HostVars class AnsibleDumper(yaml.SafeDumper): @@ -50,3 +50,13 @@ AnsibleDumper.add_representer( represent_hostvars, ) +AnsibleDumper.add_representer( + AnsibleSequence, + yaml.representer.SafeRepresenter.represent_list, +) + +AnsibleDumper.add_representer( + AnsibleMapping, + yaml.representer.SafeRepresenter.represent_dict, +) + From 4d637e5780503448840a3e4ef824b8f72aa5112a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 6 Dec 2015 22:16:31 -0800 Subject: [PATCH 0036/1113] Use self.args when we parse arguments that way the arguments can be constructed manually --- lib/ansible/cli/adhoc.py | 2 +- lib/ansible/cli/doc.py | 2 +- lib/ansible/cli/galaxy.py | 2 +- lib/ansible/cli/playbook.py | 2 +- lib/ansible/cli/pull.py | 2 +- lib/ansible/cli/vault.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 25f29fc2976..120b2302112 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -70,7 +70,7 @@ class AdHocCLI(CLI): help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, default=C.DEFAULT_MODULE_NAME) - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) if len(self.args) != 1: raise AnsibleOptionsError("Missing target hosts") diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index 4eef1dd5dd6..a17164eb50e 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -62,7 +62,7 @@ class DocCLI(CLI): self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet', help='Show playbook snippet for specified module(s)') - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity def run(self): diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 31c21146fc1..94c04614ace 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -113,7 +113,7 @@ class GalaxyCLI(CLI): help='Force overwriting an existing role') # get options, args and galaxy object - self.options, self.args =self.parser.parse_args() + self.options, self.args =self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity self.galaxy = Galaxy(self.options) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index fc81f964563..a9c0ed018dc 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -72,7 +72,7 @@ class PlaybookCLI(CLI): parser.add_option('--start-at-task', dest='start_at_task', help="start the playbook at the task matching this name") - self.options, self.args = parser.parse_args() + self.options, self.args = parser.parse_args(self.args[1:]) self.parser = parser diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 1543c704d57..593d601e8d4 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -90,7 +90,7 @@ class PullCLI(CLI): help='verify GPG signature of checked out commit, if it fails abort running the playbook.' ' This needs the corresponding VCS module to support such an operation') - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) if not self.options.dest: hostname = socket.getfqdn() diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index ac148d4770c..9908f17e578 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -69,7 +69,7 @@ class VaultCLI(CLI): elif self.action == "rekey": self.parser.set_usage("usage: %prog rekey [options] file_name") - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity can_output = ['encrypt', 'decrypt'] From 2c8eee956fb574ab0ef2ae362a2936f95a2d80cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= <yannig.perre@gmail.com> Date: Mon, 7 Dec 2015 09:25:37 +0100 Subject: [PATCH 0037/1113] Fix issue when var name is the same as content. See https://github.com/ansible/ansible/issues/13453 for more details. --- lib/ansible/plugins/action/debug.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index a024e28b01d..1d8e28c7a4a 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -45,8 +45,12 @@ class ActionModule(ActionBase): # If var is a list or dict, use the type as key to display result[to_unicode(type(self._task.args['var']))] = results else: + # If var name is same as result, try to template it if results == self._task.args['var']: - results = "VARIABLE IS NOT DEFINED!" + try: + results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True) + except: + results = "VARIABLE IS NOT DEFINED!" result[self._task.args['var']] = results else: result['msg'] = 'here we are' From dcedfbe26c2aacc901fe5ef84b51103feb92990f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 7 Dec 2015 09:54:55 -0800 Subject: [PATCH 0038/1113] corrected usage of ec2.py's profile option this was never introduced into ansible-playbook though the docs stated otherwise. We still explain how to use the env var to get the same result. --- docsite/rst/intro_dynamic_inventory.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 1a2bd6f72c3..5f491ebc2ef 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -111,9 +111,8 @@ If you use boto profiles to manage multiple AWS accounts, you can pass ``--profi aws_access_key_id = <prod access key> aws_secret_access_key = <prod secret key> -You can then run ``ec2.py --profile prod`` to get the inventory for the prod account, or run playbooks with: ``ansible-playbook -i 'ec2.py --profile prod' myplaybook.yml``. - -Alternatively, use the ``AWS_PROFILE`` variable - e.g. ``AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml`` +You can then run ``ec2.py --profile prod`` to get the inventory for the prod account, this option is not supported by ``anisble-playbook`` though. +But you can use the ``AWS_PROFILE`` variable - e.g. ``AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml`` Since each region requires its own API call, if you are only using a small set of regions, feel free to edit ``ec2.ini`` and list only the regions you are interested in. There are other config options in ``ec2.ini`` including cache control, and destination variables. From 97626475db9fab72c27a7904d8e745638a6dde1f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 7 Dec 2015 10:04:48 -0800 Subject: [PATCH 0039/1113] added new ec2_vpc_net_facts to 2.1 changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d246be10933..36886531bb5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ Ansible Changes By Release ## 2.1 TBD - ACTIVE DEVELOPMENT ####New Modules: +* aws: ec2_vpc_net_facts * cloudstack: cs_volume ####New Filters: From 9ae1dede0387c02b0f3772f168e94c99ce9f23a8 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 8 Dec 2015 06:36:04 -0800 Subject: [PATCH 0040/1113] adhoc does not load plugins by default reimplemented feature from 1.x which kept additional callbacks from poluting adhoc unless specifically asked for through configuration. --- lib/ansible/cli/adhoc.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 120b2302112..912b07a5c72 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -163,6 +163,9 @@ class AdHocCLI(CLI): else: cb = 'minimal' + if not C.DEFAULT_LOAD_CALLBACK_PLUGINS: + C.DEFAULT_CALLBACK_WHITELIST = [] + if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree From 8d500215b68aafe49c0416867af3fc701addf602 Mon Sep 17 00:00:00 2001 From: Chris Meyers <chris.meyers.fsu@gmail.com> Date: Thu, 12 Nov 2015 16:15:42 -0500 Subject: [PATCH 0041/1113] trigger jenkins integration tests --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index cec8ccca971..2e1f15559d3 100644 --- a/README.md +++ b/README.md @@ -55,3 +55,4 @@ Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.de Ansible is sponsored by [Ansible, Inc](http://ansible.com) + From 970d7cadb7f50e5f55b3aa1c12af130957f67204 Mon Sep 17 00:00:00 2001 From: David L Ballenger <dlb@davidlballenger.com> Date: Tue, 8 Dec 2015 07:11:02 -0800 Subject: [PATCH 0042/1113] Add ssh_host support for MacOSX El Capitan. OS X El Capitan moved the /etc/ssh_* files into /etc/ssh/. This fix adds a distribution version check for Darwin to set the keydir appropriately on El Capitan and later. --- lib/ansible/module_utils/facts.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 4120a51fb5b..94a5a11f726 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -524,7 +524,10 @@ class Facts(object): keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519') if self.facts['system'] == 'Darwin': - keydir = '/etc' + if self.facts['distribution'] == 'MacOSX' and LooseVersion(self.facts['distribution_version']) >= LooseVersion('10.11') : + keydir = '/etc/ssh' + else: + keydir = '/etc' else: keydir = '/etc/ssh' From 9c4eae525306bf201304a15d36f531b0308cd25e Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 8 Dec 2015 11:55:35 -0500 Subject: [PATCH 0043/1113] Fix always_run support in the action plugin for template when copying Fixes #13418 --- lib/ansible/plugins/action/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 109f3e80c0b..5edc4e8a2c4 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -157,7 +157,7 @@ class ActionModule(ActionBase): if self._play_context.diff: diff = self._get_diff_data(dest, resultant, task_vars, source_file=False) - if not self._play_context.check_mode: # do actual work thorugh copy + if not self._play_context.check_mode or self._task.always_run: # do actual work thorugh copy xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant) # fix file permissions when the copy is done as a different user From 5cac8efd73ff39268d2bebc1f501e3ae662add9d Mon Sep 17 00:00:00 2001 From: Jeremy Audet <ichimonji10@gmail.com> Date: Tue, 8 Dec 2015 09:39:45 -0500 Subject: [PATCH 0044/1113] Make "make webdocs" compatible with Python 3 The `webdocs` make target fails under Python 3. It fails due to a variety of syntax errors, such as the use of `except Foo, e` and `print 'foo'`. Fix #13463 by making code compatible with both Python 2 and 3. --- docsite/build-site.py | 23 ++++++++++++----------- hacking/module_formatter.py | 4 ++-- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/docsite/build-site.py b/docsite/build-site.py index 587a189f077..24f9fc9a647 100755 --- a/docsite/build-site.py +++ b/docsite/build-site.py @@ -15,6 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import print_function __docformat__ = 'restructuredtext' @@ -24,9 +25,9 @@ import traceback try: from sphinx.application import Sphinx except ImportError: - print "#################################" - print "Dependency missing: Python Sphinx" - print "#################################" + print("#################################") + print("Dependency missing: Python Sphinx") + print("#################################") sys.exit(1) import os @@ -40,7 +41,7 @@ class SphinxBuilder(object): """ Run the DocCommand. """ - print "Creating html documentation ..." + print("Creating html documentation ...") try: buildername = 'html' @@ -69,10 +70,10 @@ class SphinxBuilder(object): app.builder.build_all() - except ImportError, ie: + except ImportError: traceback.print_exc() - except Exception, ex: - print >> sys.stderr, "FAIL! exiting ... (%s)" % ex + except Exception as ex: + print("FAIL! exiting ... (%s)" % ex, file=sys.stderr) def build_docs(self): self.app.builder.build_all() @@ -83,9 +84,9 @@ def build_rst_docs(): if __name__ == '__main__': if '-h' in sys.argv or '--help' in sys.argv: - print "This script builds the html documentation from rst/asciidoc sources.\n" - print " Run 'make docs' to build everything." - print " Run 'make viewdocs' to build and then preview in a web browser." + print("This script builds the html documentation from rst/asciidoc sources.\n") + print(" Run 'make docs' to build everything.") + print(" Run 'make viewdocs' to build and then preview in a web browser.") sys.exit(0) build_rst_docs() @@ -93,4 +94,4 @@ if __name__ == '__main__': if "view" in sys.argv: import webbrowser if not webbrowser.open('htmlout/index.html'): - print >> sys.stderr, "Could not open on your webbrowser." + print("Could not open on your webbrowser.", file=sys.stderr) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index f4ab5d7d9ab..4c94ca3f2c4 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -140,7 +140,7 @@ def list_modules(module_dir, depth=0): if os.path.isdir(d): res = list_modules(d, depth + 1) - for key in res.keys(): + for key in list(res.keys()): if key in categories: categories[key] = merge_hash(categories[key], res[key]) res.pop(key, None) @@ -451,7 +451,7 @@ def main(): categories = list_modules(options.module_dir) last_category = None - category_names = categories.keys() + category_names = list(categories.keys()) category_names.sort() category_list_path = os.path.join(options.output_dir, "modules_by_category.rst") From d4ccb0be59c86d8518ba4becaed5c7442d8758fc Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 8 Dec 2015 09:20:49 -0800 Subject: [PATCH 0045/1113] have always_run override check mode for a task Fixes #13418 --- lib/ansible/playbook/play_context.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 9320a23ed9b..81223500adf 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -395,6 +395,10 @@ class PlayContext(Base): # set become defaults if not previouslly set task.set_become_defaults(new_info.become, new_info.become_method, new_info.become_user) + # have always_run override check mode + if task.always_run: + new_info.check_mode = False + return new_info def make_become_cmd(self, cmd, executable=None): From 7ffd578a9d38b80e71ef6df2219f7e887e2909b7 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 8 Dec 2015 09:24:20 -0800 Subject: [PATCH 0046/1113] Revert "Fix always_run support in the action plugin for template when copying" This reverts commit 9c4eae525306bf201304a15d36f531b0308cd25e. --- lib/ansible/plugins/action/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 5edc4e8a2c4..109f3e80c0b 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -157,7 +157,7 @@ class ActionModule(ActionBase): if self._play_context.diff: diff = self._get_diff_data(dest, resultant, task_vars, source_file=False) - if not self._play_context.check_mode or self._task.always_run: # do actual work thorugh copy + if not self._play_context.check_mode: # do actual work thorugh copy xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant) # fix file permissions when the copy is done as a different user From 05c8bb79f8158ca8a93d50bc798dd1bed02aaa89 Mon Sep 17 00:00:00 2001 From: Chris Meyers <chris.meyers.fsu@gmail.com> Date: Tue, 8 Dec 2015 12:24:42 -0500 Subject: [PATCH 0047/1113] playbook that Ansible jenkins runs moved into core The playbook is already running in jenkins and works. This moves the assets into core for ease of maintenance going forward. --- .../ansible.cfg | 2 + .../ec2.yml | 41 ++++++++++ .../inventory | 1 + .../inventory.dynamic | 3 + .../main.yml | 62 ++++++++++++++ .../roles/ansible_deps/.gitignore | 1 + .../roles/ansible_deps/.travis.yml | 37 +++++++++ .../roles/ansible_deps/README.md | 8 ++ .../roles/ansible_deps/defaults/main.yml | 2 + .../roles/ansible_deps/handlers/main.yml | 2 + .../ansible_deps/meta/.galaxy_install_info | 1 + .../roles/ansible_deps/meta/main.yml | 23 ++++++ .../roles/ansible_deps/tasks/main.yml | 81 +++++++++++++++++++ .../roles/ansible_deps/test/inventory | 1 + .../roles/ansible_deps/test/main.yml | 29 +++++++ .../roles/ansible_deps/test/requirements.yml | 2 + .../roles/ansible_deps/vars/main.yml | 2 + .../roles/run_integration/tasks/main.yml | 20 +++++ 18 files changed, 318 insertions(+) create mode 100644 test/utils/ansible-playbook_integration_runner/ansible.cfg create mode 100644 test/utils/ansible-playbook_integration_runner/ec2.yml create mode 100644 test/utils/ansible-playbook_integration_runner/inventory create mode 100644 test/utils/ansible-playbook_integration_runner/inventory.dynamic create mode 100644 test/utils/ansible-playbook_integration_runner/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/ansible.cfg b/test/utils/ansible-playbook_integration_runner/ansible.cfg new file mode 100644 index 00000000000..14c80651521 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +host_key_checking = False diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml new file mode 100644 index 00000000000..59e15f0da1a --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -0,0 +1,41 @@ +- name: Launch Instance + ec2: + group_id: 'sg-07bb906d' # jenkins-slave_new + count: 1 + instance_type: 'm3.medium' + image: '{{ item.image }}' + wait: true + region: 'us-east-1' + keypair: '{{ keypair }}' + aws_access_key: "{{ aws_access_key|default(lookup('env', 'AWS_ACCESS_KEY')) }}" + aws_secret_key: "{{ aws_secret_key|default(lookup('env', 'AWS_SECRET_KEY')) }}" + instance_tags: + jenkins: jenkins_ansible_pr_test + register: ec2 + with_items: slaves +# We could do an async here, that would speed things up + + +- name: Wait for SSH + wait_for: + host: "{{ item['instances'][0]['public_ip'] }}" + port: 22 + delay: 10 + timeout: 320 + state: started + with_items: ec2.results + +- name: Wait a little longer for centos + pause: seconds=20 + +- name: Add hosts group temporary inventory group with pem path + add_host: + name: "{{ item.1.platform }} {{ ec2.results[item.0]['instances'][0]['public_ip'] }}" + groups: dynamic_hosts + ansible_ssh_host: "{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" + ansible_ssh_private_key_file: '{{ pem_path }}' + ansible_ssh_user: "{{ item.1.ssh_user }}" + ec2_vars: "{{ ec2.results[item.0]['instances'][0] }}" + ec2_instance_ids: "{{ ec2.results[item.0]['instance_ids'] }}" + with_indexed_items: slaves + diff --git a/test/utils/ansible-playbook_integration_runner/inventory b/test/utils/ansible-playbook_integration_runner/inventory new file mode 100644 index 00000000000..42de3a1b5d7 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/inventory @@ -0,0 +1 @@ +localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" diff --git a/test/utils/ansible-playbook_integration_runner/inventory.dynamic b/test/utils/ansible-playbook_integration_runner/inventory.dynamic new file mode 100644 index 00000000000..1aa03b4ed8d --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/inventory.dynamic @@ -0,0 +1,3 @@ +localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" +[dynamic_hosts] +54.157.26.110 ansible_ssh_user=root ansible_ssh_private_key_file=/Users/meyers/Dropbox/.ssh/Ansible_chris_meyers.pem diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml new file mode 100644 index 00000000000..8661a6dba9e --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -0,0 +1,62 @@ +- hosts: all + connection: local + vars: + slaves: + - distribution: "Ubuntu" + version: "12.04" + image: "ami-2ccc7a44" + ssh_user: "ubuntu" + platform: "ubuntu-12.04-x86_64" + - distribution: "Ubuntu" + version: "14.04" + image: "ami-9a562df2" + ssh_user: "ubuntu" + platform: "ubuntu-14.04-x86_64" + - distribution: "CentOS" + version: "6.5" + image: "ami-8997afe0" + ssh_user: "root" + platform: "centos-6.5-x86_64" + - distribution: "CentOS" + version: "7" + image: "ami-96a818fe" + ssh_user: "centos" + platform: "centos-7-x86_64" + + tasks: + - debug: var=ansible_version + - include: ec2.yml + when: groups['dynamic_hosts'] is not defined + +- hosts: dynamic_hosts + sudo: true + vars: + credentials_file: '' + test_flags: "" + make_target: "non_destructive" + #pre_tasks: + roles: + - { role: ansible_deps, tags: ansible_deps } + - { role: run_integration, + tags: run_integration, + run_integration_test_flags: "{{ test_flags }}", + run_integration_credentials_file: "{{ credentials_file }}", + run_integration_make_target: "{{ make_target }}", } + tasks: + + - name: Kill ec2 instances + sudo: false + local_action: + module: ec2 + state: absent + region: 'us-east-1' + instance_ids: "{{ hostvars[item]['ec2_instance_ids'] }}" + when: hostvars[item]['ec2_instance_ids'] is defined and item == inventory_hostname + with_items: groups['dynamic_hosts'] + + - set_fact: + ansible_connection: local + + - name: Fail + shell: 'echo "{{ inventory_hostname }}, Failed" && exit 1' + when: "test_results.rc != 0" diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore new file mode 100644 index 00000000000..1377554ebea --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml new file mode 100644 index 00000000000..2264f0b20a7 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml @@ -0,0 +1,37 @@ +sudo: required +dist: trusty +language: python +python: + - "2.7" +services: + - docker +env: + global: + - PATH="/usr/bin:$PATH" + +before_install: + # Ansible doesn't play well with virtualenv + - deactivate + - sudo apt-get update -qq + - sudo apt-get install docker-engine + +install: + - sudo pip install docker-py + # software-properties-common for ubuntu 14.04 + # python-software-properties for ubuntu 12.04 + - sudo apt-get install -y sshpass software-properties-common python-software-properties + - sudo apt-add-repository -y ppa:ansible/ansible + - sudo apt-get update -qq + - sudo apt-get install -y ansible + - sudo rm /usr/bin/python && sudo ln -s /usr/bin/python2.7 /usr/bin/python + - ansible-galaxy install -r test/requirements.yml -p test/roles/ + +script: + # Ensure any invocation of ansible-playbook (i.e. sudo) results in host_key_checking disabled + - sudo ansible all -i "127.0.0.1," -m lineinfile -a "regexp=^#host_key_checking dest=/etc/ansible/ansible.cfg line='host_key_checking = False'" -c local + - ansible-playbook -i test/inventory test/main.yml --syntax-check + - sudo ansible-playbook -i test/inventory test/main.yml + +notifications: + # notify ansible galaxy of results + webhooks: http://goo.gl/nSuq9h diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md new file mode 100644 index 00000000000..f0fc755863c --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md @@ -0,0 +1,8 @@ +[](https://travis-ci.org/chrismeyersfsu/role-ansible_deps) + +ansible_deps +========= + +Install needed packages to run ansible integration tests. + +This role is periodically synced from ansible core repo to chrismeyersfsu/role-ansible_deps so that automated tests may run and so this role is accessible from galaxy. diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml new file mode 100644 index 00000000000..c7837fc56b1 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for . diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml new file mode 100644 index 00000000000..050cdd12342 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for . diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info new file mode 100644 index 00000000000..ffc298fff6f --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info @@ -0,0 +1 @@ +{install_date: 'Tue Dec 8 15:06:28 2015', version: master} diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml new file mode 100644 index 00000000000..07c15d619ee --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml @@ -0,0 +1,23 @@ +--- +galaxy_info: + author: Chris Meyers + description: install ansible integration test dependencies + company: Ansible + license: license (GPLv2, CC-BY, etc) + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Ubuntu + versions: + - precise + - trusty + galaxy_tags: + - testing + - integration + - ansible + - dependencies +dependencies: [] + diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml new file mode 100644 index 00000000000..f71128921d9 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml @@ -0,0 +1,81 @@ +--- + +- name: Install sudo + yum: name=sudo state=installed + ignore_errors: true + when: ansible_os_family == 'RedHat' + +- name: Install sudo + apt: name=sudo state=installed + ignore_errors: true + when: ansible_os_family == 'Debian' + +- name: Install RH epel + yum: name="epel-release" state=installed + sudo: true + when: ansible_os_family == 'RedHat' + +- name: Install RH ansible dependencies + yum: name="{{ item }}" state=installed + sudo: true + with_items: + - python-pip + - python-httplib2 + - rsync + - subversion + - mercurial + - git + - rubygems + - unzip + - openssl + - make + - gcc + - python-devel + - libselinux-python + when: ansible_os_family == 'RedHat' + +- apt: update_cache=yes + when: ansible_os_family == 'Debian' + +- name: Install Debian ansible dependencies + apt: name="{{ item }}" state=installed update_cache=yes + sudo: true + with_items: + - python-pip + - python-httplib2 + - rsync + - subversion + - mercurial + - git + - unzip + - python-dev + when: ansible_os_family == 'Debian' + +- name: Install ubuntu 12.04 ansible dependencies + apt: name="{{ item }}" state=installed update_cache=yes + sudo: true + with_items: + - rubygems + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04" + +- name: Install ubuntu 14.04 ansible dependencies + apt: name="{{ item }}" state=installed update_cache=yes + sudo: true + with_items: + - rubygems-integration + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "14.04" + +- name: Install ansible pip deps + sudo: true + pip: name="{{ item }}" + with_items: + - PyYAML + - Jinja2 + - paramiko + +- name: Remove tty sudo requirement + sudo: true + lineinfile: "dest=/etc/sudoers regexp='^Defaults[ , ]*requiretty' line='#Defaults requiretty'" + when: ansible_os_family == 'RedHat' + + diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory new file mode 100644 index 00000000000..2302edae31b --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory @@ -0,0 +1 @@ +localhost ansible_connection=local diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml new file mode 100644 index 00000000000..95617dbfac3 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml @@ -0,0 +1,29 @@ +--- +- name: Bring up docker containers + hosts: localhost + gather_facts: false + vars: + inventory: + - name: ansible_deps_host_1 + image: "chrismeyers/centos6" + - name: ansible_deps_host_2 + image: "chrismeyers/ubuntu12.04" + - name: ansible_deps_host_3 + image: "ubuntu-upstart:14.04" + roles: + - { role: provision_docker, provision_docker_company: 'ansible', provision_docker_inventory: "{{ inventory }}" } + +- name: Run ansible_deps Tests + hosts: docker_containers + vars: + git_dir: "/tmp/ansible" + roles: + - { role: ansible_deps } + tasks: + - name: Clone ansible + git: + repo: "https://github.com/ansible/ansible.git" + dest: "{{ git_dir }}" + - name: Invoke ansible in hacking mode + shell: "cd {{ git_dir }} && . hacking/env-setup && ansible --version && ansible-playbook --version" + diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml new file mode 100644 index 00000000000..fa10641a72e --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml @@ -0,0 +1,2 @@ +- src: chrismeyersfsu.provision_docker + name: provision_docker diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml new file mode 100644 index 00000000000..a38c5fb0425 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for . diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml new file mode 100644 index 00000000000..2114567d152 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Sync ansible repo to ec2 instance + synchronize: + src: "{{ sync_dir }}/" + dest: "~/ansible" + +- name: Get ansible source dir + sudo: false + shell: "cd ~ && pwd" + register: results + +- shell: ". hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" + sudo: true + environment: + TEST_FLAGS: "{{ run_integration_test_flags|default(lookup('env', 'TEST_FLAGS')) }}" + CREDENTIALS_FILE: "{{ run_integration_credentials_file|default(lookup('env', 'CREDENTIALS_FILE')) }}" + args: + chdir: "{{ results.stdout }}/ansible" + register: test_results + ignore_errors: true From 822624d061c55c5386e260b67d923627df3394fd Mon Sep 17 00:00:00 2001 From: Chris Meyers <chris.meyers.fsu@gmail.com> Date: Tue, 8 Dec 2015 14:05:57 -0500 Subject: [PATCH 0048/1113] rename role ansible_deps to ansible_test_deps --- .../roles/{ansible_deps => ansible_test_deps}/.gitignore | 0 .../roles/{ansible_deps => ansible_test_deps}/.travis.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/README.md | 0 .../roles/{ansible_deps => ansible_test_deps}/defaults/main.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/handlers/main.yml | 0 .../{ansible_deps => ansible_test_deps}/meta/.galaxy_install_info | 0 .../roles/{ansible_deps => ansible_test_deps}/meta/main.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/tasks/main.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/test/inventory | 0 .../roles/{ansible_deps => ansible_test_deps}/test/main.yml | 0 .../{ansible_deps => ansible_test_deps}/test/requirements.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/vars/main.yml | 0 12 files changed, 0 insertions(+), 0 deletions(-) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/.gitignore (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/.travis.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/README.md (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/defaults/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/handlers/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/meta/.galaxy_install_info (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/meta/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/tasks/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/test/inventory (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/test/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/test/requirements.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/vars/main.yml (100%) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.travis.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.travis.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/defaults/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/defaults/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/handlers/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/handlers/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/.galaxy_install_info similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/.galaxy_install_info diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/inventory similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/inventory diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/requirements.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/requirements.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/vars/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/vars/main.yml From de690445bca1f47e773e43b6cd6f1ed0b2ec278b Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 8 Dec 2015 14:00:17 -0500 Subject: [PATCH 0049/1113] Make fact delegating configurable, defaulting to 1.x behavior --- lib/ansible/playbook/block.py | 1 + lib/ansible/playbook/role/__init__.py | 1 + lib/ansible/playbook/role/include.py | 3 ++- lib/ansible/playbook/task.py | 1 + lib/ansible/plugins/strategy/__init__.py | 2 +- 5 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 0de5e635e7e..e842883bc82 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -34,6 +34,7 @@ class Block(Base, Become, Conditional, Taggable): _rescue = FieldAttribute(isa='list', default=[]) _always = FieldAttribute(isa='list', default=[]) _delegate_to = FieldAttribute(isa='list') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) # for future consideration? this would be functionally # similar to the 'else' clause for exceptions diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 3cb914689fe..bd7760d221c 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -61,6 +61,7 @@ def hash_params(params): class Role(Base, Become, Conditional, Taggable): _delegate_to = FieldAttribute(isa='string') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) def __init__(self, play=None): self._role_name = None diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py index 67949e2e124..6e89eb33343 100644 --- a/lib/ansible/playbook/role/include.py +++ b/lib/ansible/playbook/role/include.py @@ -40,7 +40,8 @@ class RoleInclude(RoleDefinition): is included for execution in a play. """ - _delegate_to = FieldAttribute(isa='string') + _delegate_to = FieldAttribute(isa='string') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None): super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager, loader=loader) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 21dbc87becf..6c7730cb2a5 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -72,6 +72,7 @@ class Task(Base, Conditional, Taggable, Become): _changed_when = FieldAttribute(isa='string') _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) _failed_when = FieldAttribute(isa='string') _first_available_file = FieldAttribute(isa='list') _loop = FieldAttribute(isa='string', private=True) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 0d0cc4a9dce..732a9293d28 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -289,7 +289,7 @@ class StrategyBase: # find the host we're actually refering too here, which may # be a host that is not really in inventory at all - if task.delegate_to is not None: + if task.delegate_to is not None and task.delegate_facts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: From 398f6bbb89ebdcd3ef0efdbc26d54801a0eb2e55 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 8 Dec 2015 14:34:37 -0500 Subject: [PATCH 0050/1113] Fix typo from 5ae850c --- lib/ansible/playbook/block.py | 2 +- lib/ansible/playbook/role/__init__.py | 2 +- lib/ansible/playbook/role/include.py | 2 +- lib/ansible/playbook/task.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index e842883bc82..f2d9c82833a 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -34,7 +34,7 @@ class Block(Base, Become, Conditional, Taggable): _rescue = FieldAttribute(isa='list', default=[]) _always = FieldAttribute(isa='list', default=[]) _delegate_to = FieldAttribute(isa='list') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) # for future consideration? this would be functionally # similar to the 'else' clause for exceptions diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index bd7760d221c..1c6b344a4fc 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -61,7 +61,7 @@ def hash_params(params): class Role(Base, Become, Conditional, Taggable): _delegate_to = FieldAttribute(isa='string') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) def __init__(self, play=None): self._role_name = None diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py index 6e89eb33343..43e2d9e4fc1 100644 --- a/lib/ansible/playbook/role/include.py +++ b/lib/ansible/playbook/role/include.py @@ -41,7 +41,7 @@ class RoleInclude(RoleDefinition): """ _delegate_to = FieldAttribute(isa='string') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None): super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager, loader=loader) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 6c7730cb2a5..17f1952e39c 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -72,7 +72,7 @@ class Task(Base, Conditional, Taggable, Become): _changed_when = FieldAttribute(isa='string') _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) _failed_when = FieldAttribute(isa='string') _first_available_file = FieldAttribute(isa='list') _loop = FieldAttribute(isa='string', private=True) From ec5827c22a1f238591c4c21413bf690ceb83aa1f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 8 Dec 2015 11:52:59 -0800 Subject: [PATCH 0051/1113] updated with delegate_facts directive --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36886531bb5..3d31ef4ebb2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,7 +33,7 @@ Ansible Changes By Release by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable. * Added `meta: refresh_inventory` to force rereading the inventory in a play. This re-executes inventory scripts, but does not force them to ignore any cache they might use. -* Now when you delegate an action that returns ansible_facts, these facts will be applied to the delegated host, unlike before when they were applied to the current host. +* New delegate_facts directive, a boolean that allows you to apply facts to the delegated host (true/yes) instead of the inventory_hostname (no/false) which is the default and previous behaviour. * local connections now work with 'su' as a privilege escalation method * New ssh configuration variables(`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a per-group or per-host ssh ProxyCommand or set any other ssh options. From 795fac917ea5970fd9583a41dad7a6d33a626b75 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 8 Dec 2015 11:59:04 -0800 Subject: [PATCH 0052/1113] fixed typo in tree callback, added default dir this would allow it to work with playbooks also --- lib/ansible/plugins/callback/tree.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py index 8b1118864ec..b6ecd6de878 100644 --- a/lib/ansible/plugins/callback/tree.py +++ b/lib/ansible/plugins/callback/tree.py @@ -41,7 +41,8 @@ class CallbackModule(CallbackBase): self.tree = TREE_DIR if not self.tree: - self._display.warnings("Disabling tree callback, invalid directory provided to tree option: %s" % self.tree) + self.tree = os.path.expanduser("~/.ansible/tree") + self._display.warning("Defaulting to ~/.ansible/tree, invalid directory provided to tree option: %s" % self.tree) def write_tree_file(self, hostname, buf): ''' write something into treedir/hostname ''' @@ -53,7 +54,7 @@ class CallbackModule(CallbackBase): with open(path, 'wb+') as fd: fd.write(buf) except (OSError, IOError) as e: - self._display.warnings("Unable to write to %s's file: %s" % (hostname, str(e))) + self._display.warning("Unable to write to %s's file: %s" % (hostname, str(e))) def result_to_tree(self, result): if self.tree: From 1799de8528926355f51f79f705a6927a05ba018a Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 8 Dec 2015 15:02:25 -0500 Subject: [PATCH 0053/1113] Preserve original token when appending to _raw_params in parse_kv Fixes #13311 --- lib/ansible/parsing/splitter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py index c506603acb5..f24d8ecf9de 100644 --- a/lib/ansible/parsing/splitter.py +++ b/lib/ansible/parsing/splitter.py @@ -65,8 +65,8 @@ def parse_kv(args, check_raw=False): raise raw_params = [] - for x in vargs: - x = _decode_escapes(x) + for orig_x in vargs: + x = _decode_escapes(orig_x) if "=" in x: pos = 0 try: @@ -90,7 +90,7 @@ def parse_kv(args, check_raw=False): else: options[k.strip()] = unquote(v.strip()) else: - raw_params.append(x) + raw_params.append(orig_x) # recombine the free-form params, if any were found, and assign # them to a special option for use later by the shell/command module From 0e55398e16de1ca99dbe2115a4809c57cdbb5150 Mon Sep 17 00:00:00 2001 From: Jeremy Audet <ichimonji10@gmail.com> Date: Tue, 8 Dec 2015 09:39:45 -0500 Subject: [PATCH 0054/1113] Make "make webdocs" compatible with Python 3 The `webdocs` make target fails under Python 3. It fails due to a variety of syntax errors, such as the use of `except Foo, e` and `print 'foo'`. Fix #13463 by making code compatible with both Python 2 and 3. --- docsite/build-site.py | 23 ++++++++++++----------- hacking/module_formatter.py | 4 ++-- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/docsite/build-site.py b/docsite/build-site.py index 587a189f077..24f9fc9a647 100755 --- a/docsite/build-site.py +++ b/docsite/build-site.py @@ -15,6 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import print_function __docformat__ = 'restructuredtext' @@ -24,9 +25,9 @@ import traceback try: from sphinx.application import Sphinx except ImportError: - print "#################################" - print "Dependency missing: Python Sphinx" - print "#################################" + print("#################################") + print("Dependency missing: Python Sphinx") + print("#################################") sys.exit(1) import os @@ -40,7 +41,7 @@ class SphinxBuilder(object): """ Run the DocCommand. """ - print "Creating html documentation ..." + print("Creating html documentation ...") try: buildername = 'html' @@ -69,10 +70,10 @@ class SphinxBuilder(object): app.builder.build_all() - except ImportError, ie: + except ImportError: traceback.print_exc() - except Exception, ex: - print >> sys.stderr, "FAIL! exiting ... (%s)" % ex + except Exception as ex: + print("FAIL! exiting ... (%s)" % ex, file=sys.stderr) def build_docs(self): self.app.builder.build_all() @@ -83,9 +84,9 @@ def build_rst_docs(): if __name__ == '__main__': if '-h' in sys.argv or '--help' in sys.argv: - print "This script builds the html documentation from rst/asciidoc sources.\n" - print " Run 'make docs' to build everything." - print " Run 'make viewdocs' to build and then preview in a web browser." + print("This script builds the html documentation from rst/asciidoc sources.\n") + print(" Run 'make docs' to build everything.") + print(" Run 'make viewdocs' to build and then preview in a web browser.") sys.exit(0) build_rst_docs() @@ -93,4 +94,4 @@ if __name__ == '__main__': if "view" in sys.argv: import webbrowser if not webbrowser.open('htmlout/index.html'): - print >> sys.stderr, "Could not open on your webbrowser." + print("Could not open on your webbrowser.", file=sys.stderr) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index f4ab5d7d9ab..4c94ca3f2c4 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -140,7 +140,7 @@ def list_modules(module_dir, depth=0): if os.path.isdir(d): res = list_modules(d, depth + 1) - for key in res.keys(): + for key in list(res.keys()): if key in categories: categories[key] = merge_hash(categories[key], res[key]) res.pop(key, None) @@ -451,7 +451,7 @@ def main(): categories = list_modules(options.module_dir) last_category = None - category_names = categories.keys() + category_names = list(categories.keys()) category_names.sort() category_list_path = os.path.join(options.output_dir, "modules_by_category.rst") From 021605a19578309cccc5cdec8c47c512b819d7e0 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 12 Nov 2015 18:42:39 -0800 Subject: [PATCH 0055/1113] keep string type filters as strings now we don't try to convert types if using a filter that outputs a specifically formated string made list of filters configurable --- lib/ansible/constants.py | 1 + lib/ansible/template/__init__.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 6faae928dbe..0f809db7297 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -261,6 +261,7 @@ GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" +STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True ) # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index bdd0612bddd..8ce2358eb1e 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -164,7 +164,8 @@ class Templar: self.block_end = self.environment.block_end_string self.variable_start = self.environment.variable_start_string self.variable_end = self.environment.variable_end_string - self._clean_regex = re.compile(r'(?:%s[%s%s]|[%s%s]%s)' % (self.variable_start[0], self.variable_start[1], self.block_start[1], self.block_end[0], self.variable_end[0], self.variable_end[1])) + self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (self.variable_start, self.block_start, self.block_end, self.variable_end)) + self._no_type_regex = re.compile(r'.*\|(?:%s)\s*(?:%s)?$' % ('|'.join(C.STRING_TYPE_FILTERS), self.variable_end)) def _get_filters(self): ''' @@ -278,8 +279,7 @@ class Templar: if fail_on_undefined is None: fail_on_undefined = self._fail_on_undefined_errors - # Don't template unsafe variables, instead drop them back down to - # their constituent type. + # Don't template unsafe variables, instead drop them back down to their constituent type. if hasattr(variable, '__UNSAFE__'): if isinstance(variable, text_type): return self._clean_data(text_type(variable)) @@ -294,6 +294,7 @@ class Templar: if isinstance(variable, string_types): result = variable + if self._contains_vars(variable): # Check to see if the string we are trying to render is just referencing a single @@ -319,7 +320,7 @@ class Templar: result = self._cached_result[sha1_hash] else: result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides) - if convert_data: + if convert_data and not self._no_type_regex.match(variable): # if this looks like a dictionary or list, convert it to such using the safe_eval method if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \ result.startswith("[") or result in ("True", "False"): From d82d65ee7bd2506e06ffb225a2e9be6fa1ac36db Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 12 Nov 2015 18:42:39 -0800 Subject: [PATCH 0056/1113] keep string type filters as strings now we don't try to convert types if using a filter that outputs a specifically formated string made list of filters configurable --- lib/ansible/constants.py | 1 + lib/ansible/template/__init__.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 6faae928dbe..0f809db7297 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -261,6 +261,7 @@ GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" +STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True ) # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index bdd0612bddd..8ce2358eb1e 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -164,7 +164,8 @@ class Templar: self.block_end = self.environment.block_end_string self.variable_start = self.environment.variable_start_string self.variable_end = self.environment.variable_end_string - self._clean_regex = re.compile(r'(?:%s[%s%s]|[%s%s]%s)' % (self.variable_start[0], self.variable_start[1], self.block_start[1], self.block_end[0], self.variable_end[0], self.variable_end[1])) + self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (self.variable_start, self.block_start, self.block_end, self.variable_end)) + self._no_type_regex = re.compile(r'.*\|(?:%s)\s*(?:%s)?$' % ('|'.join(C.STRING_TYPE_FILTERS), self.variable_end)) def _get_filters(self): ''' @@ -278,8 +279,7 @@ class Templar: if fail_on_undefined is None: fail_on_undefined = self._fail_on_undefined_errors - # Don't template unsafe variables, instead drop them back down to - # their constituent type. + # Don't template unsafe variables, instead drop them back down to their constituent type. if hasattr(variable, '__UNSAFE__'): if isinstance(variable, text_type): return self._clean_data(text_type(variable)) @@ -294,6 +294,7 @@ class Templar: if isinstance(variable, string_types): result = variable + if self._contains_vars(variable): # Check to see if the string we are trying to render is just referencing a single @@ -319,7 +320,7 @@ class Templar: result = self._cached_result[sha1_hash] else: result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides) - if convert_data: + if convert_data and not self._no_type_regex.match(variable): # if this looks like a dictionary or list, convert it to such using the safe_eval method if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \ result.startswith("[") or result in ("True", "False"): From c1cec64aa8372f2e7d565a2717c68a075836ae9b Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 8 Dec 2015 14:18:11 -0800 Subject: [PATCH 0057/1113] added delegate_facts docs --- docsite/rst/playbooks_delegation.rst | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index 4411e4aa29f..4e2e8c372ac 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -130,6 +130,29 @@ Here is an example:: Note that you must have passphrase-less SSH keys or an ssh-agent configured for this to work, otherwise rsync will need to ask for a passphrase. +.. _delegate_facts: + +Delegated facts +``````````````` + +.. versionadded:: 2.0 + +Before 2.0 any facts gathered by a delegated task were assigned to the `inventory_hostname` (current host) instead of the host which actually produced the facts (delegated to host). +The new directive `delegate_facts` if set to `True` will assing the task's gathered facts to the delegated host instead of the current one.:: + + + - hosts: app_servers + tasks: + - name: gather facts from db servers + setup: + delegate_to: "{{item}}" + delegate_facts: True + with_items: "{{groups['dbservers'}}" + +The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers, +that way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`. + + .. _run_once: Run Once From ea72fd65474d52936523c9cb3c12c3827f8438f1 Mon Sep 17 00:00:00 2001 From: = <jhawkesworth@users.noreply.github.com> Date: Wed, 9 Dec 2015 08:57:06 +0000 Subject: [PATCH 0058/1113] adding integration tests for win_regmerge module (extras) --- .../test_win_regmerge/files/settings1.reg | Bin 0 -> 374 bytes .../test_win_regmerge/files/settings2.reg | Bin 0 -> 760 bytes .../test_win_regmerge/files/settings3.reg | Bin 0 -> 1926 bytes .../roles/test_win_regmerge/meta/main.yml | 3 + .../roles/test_win_regmerge/tasks/main.yml | 133 ++++++++++++++++++ .../templates/win_line_ending.j2 | 4 + .../roles/test_win_regmerge/vars/main.yml | 1 + test/integration/test_winrm.yml | 1 + 8 files changed, 142 insertions(+) create mode 100644 test/integration/roles/test_win_regmerge/files/settings1.reg create mode 100644 test/integration/roles/test_win_regmerge/files/settings2.reg create mode 100644 test/integration/roles/test_win_regmerge/files/settings3.reg create mode 100644 test/integration/roles/test_win_regmerge/meta/main.yml create mode 100644 test/integration/roles/test_win_regmerge/tasks/main.yml create mode 100644 test/integration/roles/test_win_regmerge/templates/win_line_ending.j2 create mode 100644 test/integration/roles/test_win_regmerge/vars/main.yml diff --git a/test/integration/roles/test_win_regmerge/files/settings1.reg b/test/integration/roles/test_win_regmerge/files/settings1.reg new file mode 100644 index 0000000000000000000000000000000000000000..baec75b2af0ee7f806f7e51fc362da820d60e4f6 GIT binary patch literal 374 zcmZXQO-sX25Jk^g@IQpCbR*(N5y6E-gF&lOt3j-kQj3JvL{d@v=hc(Zojiu&e!O{i z-uG8YMa>fpA1p~2FymQn$r~*znN!tD)QA)A)LYd`T#NVFV%xLMTGRt)oO|b<F&A8s zacm#cw`Xbvm#QOf)N+QD@`GT?$k1btU3PRezB|*I1)oH3d6ntQG?8H{Dx6<2*E^K` z)!s#gnI9ul>MLFPxxU)%PVB9Y>EBi>QjV;QL+6dSR&DgPOn7m}T>nCU_dgqaazKyG XaQ@HMrJF?ZTeIfQSp;gspGKY^4^lt~ literal 0 HcmV?d00001 diff --git a/test/integration/roles/test_win_regmerge/files/settings2.reg b/test/integration/roles/test_win_regmerge/files/settings2.reg new file mode 100644 index 0000000000000000000000000000000000000000..fc2612cb8a8a3937b72e36400b434bac8753e02f GIT binary patch literal 760 zcma))TT4Pw5QW!s(0@4iEaB}a1U<yDD9Q}2Br3%4c!Bi9(Qbdf`ewEU3PNo5W!B7I zvu5`G`R-||xrWO0p@kxKlxhsN&{Da+Ku+*Zu`=vPcldL>Q|vi*tOJ!8``YvQpfg=? z=U#nXs;xxF?0vP^6MW~o!uzN$zEj<(2i|d{=Njs&sj3Q58F^D86UQUpbG?EzJ@*o} zf!>KRX4Buop6khV6o^VS0(sL5>O(}Aimb9!GZl2C38zmTqQ9;pv&`Bcdy$#?YUj15 z?om}(&0k`TQETTa>$(funXl21qrv*OnKNXt%q8b6>)APtEp{yd*~NTIhpb1Nyg^^Q zQW@AyW$Y>|t4srshSyW`Of|<tA9m$y(qpS)$h%Ga-;BMY;U8|D+#L2VwmZ_~?ucQ^ kQ(kPzTdkR`L%aB_=Yh|D>4KM?vA1Dpp)32BqOvpm0(f<CxBvhE literal 0 HcmV?d00001 diff --git a/test/integration/roles/test_win_regmerge/files/settings3.reg b/test/integration/roles/test_win_regmerge/files/settings3.reg new file mode 100644 index 0000000000000000000000000000000000000000..fbe7411c95efab7b1763849940f2f6fd1f27b4a6 GIT binary patch literal 1926 zcmdUw-A`IU5XH~4N&g3OpG{M(T5Hk=3ss0kj9`iJHiilnqTFjP;Kx6&{$}=G!H-zp zwAn11yR)-%X3p8+?zXLg5_Og8N+XrDubxiX8)>XeH|+etI^ZkiyQE*(Gpqx?6TVmV zQJ)wewBYkdRqfz&sAK(9LuGBU9;?n;##YlV)>ut!TWYDz{=V<D9mASd^jXWCN{QR! zWZCyizGk{$r_N4FY+I8DY`;Bzr0>Mo_P8zLR*6&R{z%`LWo_Ueb7nx6C+d*bSAD_q z_V{C}(V?<u%#e#=db@hg;)&tPG|_jA2^foN8#_Z-F(!J<G1aelwYw<FG}EQ#rjXfT znMH8i!MEq9jkhT$1tZZDy#*EnTM^31;INP1<-;w7tp*m;a7X8iiZdM|^*MW;V_I}n z^ZW+Hdqgx{PKZ0saX+oh6v2p`CZK3?Hl;=lFrD+JEQ^0vrw-<8lzYmsDo?z+ulug% zZ(!E9f@kLWhd2BB4NLhA1?CpjHI6KP$oM7XOT5{2MyOKFbGANliT6p)%}ahk6Gcq( ztB|X`*d|`{`iJgguiLaZBv<QbkF~N3{-OT(tquLnd&p=`F02~XD~wHJv5D%e9-(h0 ve#=x>%X+RTtBlR}`kq6d4H^p_{{8o$SO+j*{}T3(^grOOqR`a4)hyn7xKt{f literal 0 HcmV?d00001 diff --git a/test/integration/roles/test_win_regmerge/meta/main.yml b/test/integration/roles/test_win_regmerge/meta/main.yml new file mode 100644 index 00000000000..55200b3fc64 --- /dev/null +++ b/test/integration/roles/test_win_regmerge/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_win_tests + diff --git a/test/integration/roles/test_win_regmerge/tasks/main.yml b/test/integration/roles/test_win_regmerge/tasks/main.yml new file mode 100644 index 00000000000..6e64c9dd4a7 --- /dev/null +++ b/test/integration/roles/test_win_regmerge/tasks/main.yml @@ -0,0 +1,133 @@ +# test code for the win_regmerge module +# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com> + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# clear the area of the registry we are using for tests +- name: remove setting + win_regedit: + key: 'HKLM:\SOFTWARE\Wow6432Node\Cow Corp' + state: absent + +# copy over some registry files to work with +- name: copy over some registry files to work with + win_copy: src={{item}} dest={{win_output_dir}}\\{{item}} + with_items: + - settings1.reg + - settings2.reg + - settings3.reg + +# test 1 - basic test of changed behaviour +# merge in REG_SZ +- name: test 1 merge in a setting + win_regmerge: + path: "{{win_output_dir}}\\settings1.reg" + register: merge11_result + +- assert: + that: + - "merge11_result.changed == true" + +# re run the merge +- name: test 1 merge in the setting again + win_regmerge: + path: "{{win_output_dir}}\\settings1.reg" + register: merge12_result + +# without a compare to key, should allways report changed +- assert: + that: + - "merge12_result.changed == true" +# assert changed false + +# prune reg key +- name: test 1 remove setting + win_regedit: + key: 'HKLM:\SOFTWARE\Wow6432Node\Cow Corp' + state: absent + +# +# test 2, observe behaviour when compare_to param is set +# +- name: test 2 merge in a setting + win_regmerge: + path: "{{win_output_dir}}\\settings1.reg" + compare_to: 'HKLM:\SOFTWARE\Wow6432Node\Cow Corp\Moosic\ILikeToMooveIt' + register: merge21_result + +- assert: + that: + - "merge21_result.changed == true" + +# re run the merge +- name: test 2 merge in the setting again but with compare_key + win_regmerge: + path: "{{win_output_dir}}\\settings1.reg" + compare_to: 'HKLM:\SOFTWARE\Wow6432Node\Cow Corp\Moosic\ILikeToMooveIt' + register: merge22_result + +# with a compare to key, should now report not changed +- assert: + that: + - "merge22_result.changed == false" +# assert changed false + +# prune the contents of the registry from the parent of the compare key downwards +- name: test 2 clean up remove setting + win_regedit: + key: 'HKLM:\SOFTWARE\Wow6432Node\Cow Corp' + state: absent + +# test 3 merge in more complex settings +- name: test 3 merge in a setting + win_regmerge: + path: "{{win_output_dir}}\\settings3.reg" + compare_to: 'HKLM:\SOFTWARE\Wow6432Node\Cow Corp\Moo Monitor' + register: merge31_result + +- assert: + that: + - "merge31_result.changed == true" + +# re run the merge +- name: test 3 merge in the setting again but with compare_key check + win_regmerge: + path: "{{win_output_dir}}\\settings3.reg" + compare_to: 'HKLM:\SOFTWARE\Wow6432Node\Cow Corp\Moo Monitor' + register: merge32_result + +# with a compare to key, should now report not changed +- assert: + that: + - "merge32_result.changed == false" +# assert changed false + +# prune the contents of the registry from the compare key downwards +- name: test 3 clean up remove setting + win_regedit: + key: 'HKLM:\SOFTWARE\Wow6432Node\Cow Corp' + state: absent + +# clean up registry files + +- name: clean up registry files + win_file: path={{win_output_dir}}\\{{item}} state=absent + with_items: + - settings1.reg + - settings2.reg + - settings3.reg + +# END OF win_regmerge tests diff --git a/test/integration/roles/test_win_regmerge/templates/win_line_ending.j2 b/test/integration/roles/test_win_regmerge/templates/win_line_ending.j2 new file mode 100644 index 00000000000..d0cefd76f49 --- /dev/null +++ b/test/integration/roles/test_win_regmerge/templates/win_line_ending.j2 @@ -0,0 +1,4 @@ +#jinja2: newline_sequence:'\r\n' +{{ templated_var }} +{{ templated_var }} +{{ templated_var }} diff --git a/test/integration/roles/test_win_regmerge/vars/main.yml b/test/integration/roles/test_win_regmerge/vars/main.yml new file mode 100644 index 00000000000..1e8f64ccf44 --- /dev/null +++ b/test/integration/roles/test_win_regmerge/vars/main.yml @@ -0,0 +1 @@ +templated_var: templated_var_loaded diff --git a/test/integration/test_winrm.yml b/test/integration/test_winrm.yml index f11171faf8c..51a5daa51fb 100644 --- a/test/integration/test_winrm.yml +++ b/test/integration/test_winrm.yml @@ -37,4 +37,5 @@ - { role: test_win_copy, tags: test_win_copy } - { role: test_win_template, tags: test_win_template } - { role: test_win_lineinfile, tags: test_win_lineinfile } + - { role: test_win_regmerge, tags: test_win_regmerge } From 57391f49ba5e7692e50e4e43ed9c541511eb0936 Mon Sep 17 00:00:00 2001 From: Chris Meyers <chris.meyers.fsu@gmail.com> Date: Wed, 9 Dec 2015 07:52:43 -0500 Subject: [PATCH 0059/1113] removed ansible_python_interpreter * added missed renames of ansible_deps to ansible_test_deps * removed acidential inventory.dynamic file * modified README for ansible_test_deps role --- .../ansible-playbook_integration_runner/inventory | 2 +- .../inventory.dynamic | 3 --- .../utils/ansible-playbook_integration_runner/main.yml | 2 +- .../roles/ansible_test_deps/README.md | 6 ++---- .../roles/ansible_test_deps/test/main.yml | 10 +++++----- 5 files changed, 9 insertions(+), 14 deletions(-) delete mode 100644 test/utils/ansible-playbook_integration_runner/inventory.dynamic diff --git a/test/utils/ansible-playbook_integration_runner/inventory b/test/utils/ansible-playbook_integration_runner/inventory index 42de3a1b5d7..2302edae31b 100644 --- a/test/utils/ansible-playbook_integration_runner/inventory +++ b/test/utils/ansible-playbook_integration_runner/inventory @@ -1 +1 @@ -localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" +localhost ansible_connection=local diff --git a/test/utils/ansible-playbook_integration_runner/inventory.dynamic b/test/utils/ansible-playbook_integration_runner/inventory.dynamic deleted file mode 100644 index 1aa03b4ed8d..00000000000 --- a/test/utils/ansible-playbook_integration_runner/inventory.dynamic +++ /dev/null @@ -1,3 +0,0 @@ -localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" -[dynamic_hosts] -54.157.26.110 ansible_ssh_user=root ansible_ssh_private_key_file=/Users/meyers/Dropbox/.ssh/Ansible_chris_meyers.pem diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 8661a6dba9e..5d15541490f 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -36,7 +36,7 @@ make_target: "non_destructive" #pre_tasks: roles: - - { role: ansible_deps, tags: ansible_deps } + - { role: ansible_test_deps, tags: ansible_test_deps } - { role: run_integration, tags: run_integration, run_integration_test_flags: "{{ test_flags }}", diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md index f0fc755863c..09ffacacaf5 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md @@ -1,8 +1,6 @@ -[](https://travis-ci.org/chrismeyersfsu/role-ansible_deps) +[](https://travis-ci.org/chrismeyersfsu/ansible_test_deps) -ansible_deps +ansible_test_deps ========= Install needed packages to run ansible integration tests. - -This role is periodically synced from ansible core repo to chrismeyersfsu/role-ansible_deps so that automated tests may run and so this role is accessible from galaxy. diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml index 95617dbfac3..b66d699d5d6 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml @@ -4,21 +4,21 @@ gather_facts: false vars: inventory: - - name: ansible_deps_host_1 + - name: ansible_test_deps_host_1 image: "chrismeyers/centos6" - - name: ansible_deps_host_2 + - name: ansible_test_deps_host_2 image: "chrismeyers/ubuntu12.04" - - name: ansible_deps_host_3 + - name: ansible_test_deps_host_3 image: "ubuntu-upstart:14.04" roles: - { role: provision_docker, provision_docker_company: 'ansible', provision_docker_inventory: "{{ inventory }}" } -- name: Run ansible_deps Tests +- name: Run ansible_test_deps Tests hosts: docker_containers vars: git_dir: "/tmp/ansible" roles: - - { role: ansible_deps } + - { role: ansible_test_deps } tasks: - name: Clone ansible git: From f16628ffecfa5ece0535c9b1c3de78cc78e18575 Mon Sep 17 00:00:00 2001 From: Chris Meyers <chris.meyers.fsu@gmail.com> Date: Wed, 9 Dec 2015 09:37:39 -0500 Subject: [PATCH 0060/1113] symbolic link role for testing --- .../roles/ansible_test_deps/test/roles/ansible_test_deps | 1 + 1 file changed, 1 insertion(+) create mode 120000 test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps new file mode 120000 index 00000000000..eb6d9edda4b --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps @@ -0,0 +1 @@ +../../../ansible_test_deps \ No newline at end of file From 8d66dcda21f176ee7cce21e99f52dea384ef42b8 Mon Sep 17 00:00:00 2001 From: Chris Meyers <chris.meyers.fsu@gmail.com> Date: Wed, 9 Dec 2015 09:39:45 -0500 Subject: [PATCH 0061/1113] remove .gitignore --- .../roles/ansible_test_deps/.gitignore | 1 - 1 file changed, 1 deletion(-) delete mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore deleted file mode 100644 index 1377554ebea..00000000000 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.swp From 0719eb3e2d798c6f80223e37dd77bc0ac41c537d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Dec 2015 06:32:04 -0800 Subject: [PATCH 0062/1113] clarified warning from tree callback --- lib/ansible/plugins/callback/tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py index b6ecd6de878..ee710a6dfdf 100644 --- a/lib/ansible/plugins/callback/tree.py +++ b/lib/ansible/plugins/callback/tree.py @@ -42,7 +42,7 @@ class CallbackModule(CallbackBase): self.tree = TREE_DIR if not self.tree: self.tree = os.path.expanduser("~/.ansible/tree") - self._display.warning("Defaulting to ~/.ansible/tree, invalid directory provided to tree option: %s" % self.tree) + self._display.warning("The tree callback is defaulting to ~/.ansible/tree, as an invalid directory was provided: %s" % self.tree) def write_tree_file(self, hostname, buf): ''' write something into treedir/hostname ''' From 87969868d42cd8aba1c65c8207d059d73407373b Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Dec 2015 07:21:00 -0800 Subject: [PATCH 0063/1113] avoid persistent containers in attribute defaults moved from the field attribute declaration and created a placeholder which then is resolved in the field attribute class. this is to avoid unwanted persistent of the defaults across objects which introduces stealth bugs when multiple objects of the same kind are used in succession while not overriding the default values. --- lib/ansible/playbook/attribute.py | 11 +++++++++++ lib/ansible/playbook/block.py | 6 +++--- lib/ansible/playbook/conditional.py | 2 +- lib/ansible/playbook/play.py | 16 ++++++++-------- lib/ansible/playbook/play_context.py | 4 ++-- lib/ansible/playbook/playbook_include.py | 2 +- lib/ansible/playbook/role/metadata.py | 2 +- lib/ansible/playbook/taggable.py | 2 +- lib/ansible/playbook/task.py | 2 +- 9 files changed, 29 insertions(+), 18 deletions(-) diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py index 703d9dbca1e..ce7ed6d8fe7 100644 --- a/lib/ansible/playbook/attribute.py +++ b/lib/ansible/playbook/attribute.py @@ -32,6 +32,17 @@ class Attribute: self.priority = priority self.always_post_validate = always_post_validate + # This is here to avoid `default=<container>` unwanted persistence across object instances + # We cannot rely on None as some fields use it to skip the code + # that would detect an empty container as a user error + if self.default == '_ansible_container': + if self.isa == 'list': + self.default = [] + elif self.isa == 'dict': + self.default = {} + elif self.isa == 'set': + self.default = set() + def __eq__(self, other): return other.priority == self.priority diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index f2d9c82833a..66009b028af 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -30,9 +30,9 @@ from ansible.playbook.taggable import Taggable class Block(Base, Become, Conditional, Taggable): - _block = FieldAttribute(isa='list', default=[]) - _rescue = FieldAttribute(isa='list', default=[]) - _always = FieldAttribute(isa='list', default=[]) + _block = FieldAttribute(isa='list', default='_ansible_container') + _rescue = FieldAttribute(isa='list', default='_ansible_container') + _always = FieldAttribute(isa='list', default='_ansible_container') _delegate_to = FieldAttribute(isa='list') _delegate_facts = FieldAttribute(isa='bool', default=False) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index fc178e2fa1d..a5b3ca725f8 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -33,7 +33,7 @@ class Conditional: to be run conditionally when a condition is met or skipped. ''' - _when = FieldAttribute(isa='list', default=[]) + _when = FieldAttribute(isa='list', default='_ansible_container') def __init__(self, loader=None): # when used directly, this class needs a loader, but we want to diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index ed61416e951..e08c8c60016 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,22 +64,22 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True) + _hosts = FieldAttribute(isa='list', default='_ansible_container', required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) # Variable Attributes - _vars_files = FieldAttribute(isa='list', default=[], priority=99) - _vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True) + _vars_files = FieldAttribute(isa='list', default='_ansible_container', priority=99) + _vars_prompt = FieldAttribute(isa='list', default='_ansible_container', always_post_validate=True) _vault_password = FieldAttribute(isa='string', always_post_validate=True) # Role Attributes - _roles = FieldAttribute(isa='list', default=[], priority=90) + _roles = FieldAttribute(isa='list', default='_ansible_container', priority=90) # Block (Task) Lists Attributes - _handlers = FieldAttribute(isa='list', default=[]) - _pre_tasks = FieldAttribute(isa='list', default=[]) - _post_tasks = FieldAttribute(isa='list', default=[]) - _tasks = FieldAttribute(isa='list', default=[]) + _handlers = FieldAttribute(isa='list', default='_ansible_container') + _pre_tasks = FieldAttribute(isa='list', default='_ansible_container') + _post_tasks = FieldAttribute(isa='list', default='_ansible_container') + _tasks = FieldAttribute(isa='list', default='_ansible_container') # Flag/Setting Attributes _any_errors_fatal = FieldAttribute(isa='bool', default=False, always_post_validate=True) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 81223500adf..da291c3c834 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -171,8 +171,8 @@ class PlayContext(Base): # general flags _verbosity = FieldAttribute(isa='int', default=0) - _only_tags = FieldAttribute(isa='set', default=set()) - _skip_tags = FieldAttribute(isa='set', default=set()) + _only_tags = FieldAttribute(isa='set', default='_ansible_container') + _skip_tags = FieldAttribute(isa='set', default='_ansible_container') _check_mode = FieldAttribute(isa='bool', default=False) _force_handlers = FieldAttribute(isa='bool', default=False) _start_at_task = FieldAttribute(isa='string') diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index d9af2ba5237..52081c41539 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -35,7 +35,7 @@ class PlaybookInclude(Base, Conditional, Taggable): _name = FieldAttribute(isa='string') _include = FieldAttribute(isa='string') - _vars = FieldAttribute(isa='dict', default=dict()) + _vars = FieldAttribute(isa='dict', default='_ansible_container') @staticmethod def load(data, basedir, variable_manager=None, loader=None): diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py index 58b59145a1c..4bb7d0ce02b 100644 --- a/lib/ansible/playbook/role/metadata.py +++ b/lib/ansible/playbook/role/metadata.py @@ -40,7 +40,7 @@ class RoleMetadata(Base): ''' _allow_duplicates = FieldAttribute(isa='bool', default=False) - _dependencies = FieldAttribute(isa='list', default=[]) + _dependencies = FieldAttribute(isa='list', default='_ansible_container') _galaxy_info = FieldAttribute(isa='GalaxyInfo') def __init__(self, owner=None): diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py index 8f5cfa09344..37e3261e80d 100644 --- a/lib/ansible/playbook/taggable.py +++ b/lib/ansible/playbook/taggable.py @@ -29,7 +29,7 @@ from ansible.template import Templar class Taggable: untagged = frozenset(['untagged']) - _tags = FieldAttribute(isa='list', default=[], listof=(string_types,int)) + _tags = FieldAttribute(isa='list', default='_ansible_container', listof=(string_types,int)) def __init__(self): super(Taggable, self).__init__() diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 17f1952e39c..53a9a3c3931 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -64,7 +64,7 @@ class Task(Base, Conditional, Taggable, Become): # will be used if defined # might be possible to define others - _args = FieldAttribute(isa='dict', default=dict()) + _args = FieldAttribute(isa='dict', default='_ansible_container') _action = FieldAttribute(isa='string') _any_errors_fatal = FieldAttribute(isa='bool') From 4f84769a17bb92894ee31b08267cf9aec1c0118c Mon Sep 17 00:00:00 2001 From: chouseknecht <chouse@ansible.com> Date: Wed, 9 Dec 2015 10:51:12 -0500 Subject: [PATCH 0064/1113] Galaxy 2.0 --- docsite/rst/galaxy.rst | 291 ++++++++++++++++- lib/ansible/cli/galaxy.py | 322 +++++++++++++++++-- lib/ansible/constants.py | 3 +- lib/ansible/galaxy/__init__.py | 2 + lib/ansible/galaxy/api.py | 205 ++++++++---- lib/ansible/galaxy/data/metadata_template.j2 | 14 + lib/ansible/galaxy/data/test_playbook.j2 | 5 + lib/ansible/galaxy/data/travis.j2 | 29 ++ lib/ansible/galaxy/login.py | 113 +++++++ lib/ansible/galaxy/role.py | 10 +- lib/ansible/galaxy/token.py | 67 ++++ 11 files changed, 949 insertions(+), 112 deletions(-) create mode 100644 lib/ansible/galaxy/data/test_playbook.j2 create mode 100644 lib/ansible/galaxy/data/travis.j2 create mode 100644 lib/ansible/galaxy/login.py create mode 100644 lib/ansible/galaxy/token.py diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 1b9475c418d..783ac15e456 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -8,7 +8,7 @@ Ansible Galaxy The Website ``````````` -The website `Ansible Galaxy <https://galaxy.ansible.com>`_, is a free site for finding, downloading, rating, and reviewing all kinds of community developed Ansible roles and can be a great way to get a jumpstart on your automation projects. +The website `Ansible Galaxy <https://galaxy.ansible.com>`_, is a free site for finding, downloading, and sharing community developed Ansible roles. Downloading roles from Galaxy is a great way to jumpstart your automation projects. You can sign up with social auth and use the download client 'ansible-galaxy' which is included in Ansible 1.4.2 and later. @@ -24,7 +24,7 @@ Installing Roles The most obvious is downloading roles from the Ansible Galaxy website:: - ansible-galaxy install username.rolename + $ ansible-galaxy install username.rolename .. _galaxy_cli_roles_path: @@ -33,23 +33,16 @@ roles_path You can specify a particular directory where you want the downloaded roles to be placed:: - ansible-galaxy install username.role -p ~/Code/ansible_roles/ + $ ansible-galaxy install username.role -p ~/Code/ansible_roles/ This can be useful if you have a master folder that contains ansible galaxy roles shared across several projects. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured). -Building out Role Scaffolding ------------------------------ - -It can also be used to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires:: - - ansible-galaxy init rolename - Installing Multiple Roles From A File -------------------------------------- +===================================== To install multiple roles, the ansible-galaxy CLI can be fed a requirements file. All versions of ansible allow the following syntax for installing roles from the Ansible Galaxy website:: - ansible-galaxy install -r requirements.txt + $ ansible-galaxy install -r requirements.txt Where the requirements.txt looks like:: @@ -64,7 +57,7 @@ To request specific versions (tags) of a role, use this syntax in the roles file Available versions will be listed on the Ansible Galaxy webpage for that role. Advanced Control over Role Requirements Files ---------------------------------------------- +============================================= For more advanced control over where to download roles from, including support for remote repositories, Ansible 1.8 and later support a new YAML format for the role requirements file, which must end in a 'yml' extension. It works like this:: @@ -121,3 +114,275 @@ Roles pulled from galaxy work as with other SCM sourced roles above. To download `irc.freenode.net <http://irc.freenode.net>`_ #ansible IRC chat channel +Building Role Scaffolding +------------------------- + +Use the init command to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires:: + + $ ansible-galaxy init rolename + +The above will create the following directory structure in the current working directory: + +:: + + README.md + .travsis.yml + defaults/ + main.yml + files/ + handlers/ + main.yml + meta/ + main.yml + templates/ + tests/ + inventory + test.yml + vars/ + main.yml + +.. note:: + + .travis.yml and tests/ are new in Ansible 2.0 + +If a directory matching the name of the role already exists in the current working directory, the init command will result in an error. To ignore the error use the --force option. Force will create the above subdirectories and files, replacing anything that matches. + +Search for Roles +---------------- + +The search command provides for querying the Galaxy database, allowing for searching by tags, platforms, author and multiple keywords. For example: + +:: + + $ ansible-galaxy search elasticsearch --author geerlingguy + +The search command will return a list of the first 1000 results matching your search: + +:: + + Found 2 roles matching your search: + + Name Description + ---- ----------- + geerlingguy.elasticsearch Elasticsearch for Linux. + geerlingguy.elasticsearch-curator Elasticsearch curator for Linux. + +.. note:: + + The format of results pictured here is new in Ansible 2.0. + +Get More Information About a Role +--------------------------------- + +Use the info command To view more detail about a specific role: + +:: + + $ ansible-galaxy info username.role_name + +This returns everything found in Galaxy for the role: + +:: + + Role: username.rolename + description: Installs and configures a thing, a distributed, highly available NoSQL thing. + active: True + commit: c01947b7bc89ebc0b8a2e298b87ab416aed9dd57 + commit_message: Adding travis + commit_url: https://github.com/username/repo_name/commit/c01947b7bc89ebc0b8a2e298b87ab + company: My Company, Inc. + created: 2015-12-08T14:17:52.773Z + download_count: 1 + forks_count: 0 + github_branch: + github_repo: repo_name + github_user: username + id: 6381 + is_valid: True + issue_tracker_url: + license: Apache + min_ansible_version: 1.4 + modified: 2015-12-08T18:43:49.085Z + namespace: username + open_issues_count: 0 + path: /Users/username/projects/roles + scm: None + src: username.repo_name + stargazers_count: 0 + travis_status_url: https://travis-ci.org/username/repo_name.svg?branch=master + version: + watchers_count: 1 + +.. note:: + + The format of results pictured here is new in Ansible 2.0. + + +List Installed Roles +-------------------- + +The list command shows the name and version of each role installed in roles_path. + +:: + + $ ansible-galaxy list + + - chouseknecht.role-install_mongod, master + - chouseknecht.test-role-1, v1.0.2 + - chrismeyersfsu.role-iptables, master + - chrismeyersfsu.role-required_vars, master + +Remove an Installed Role +------------------------ + +The remove command will delete a role from roles_path: + +:: + + $ ansible-galaxy remove username.rolename + +Authenticate with Galaxy +------------------------ + +To use the import, delete and setup commands authentication with Galaxy is required. The login command will authenticate the user,retrieve a token from Galaxy, and store it in the user's home directory. + +:: + + $ ansible-galaxy login + + We need your Github login to identify you. + This information will not be sent to Galaxy, only to api.github.com. + The password will not be displayed. + + Use --github-token if you do not want to enter your password. + + Github Username: dsmith + Password for dsmith: + Succesfully logged into Galaxy as dsmith + +As depicted above, the login command prompts for a GitHub username and password. It does NOT send your password to Galaxy. It actually authenticates with GitHub and creates a personal access token. It then sends the personal access token to Galaxy, which in turn verifies that you are you and returns a Galaxy access token. After authentication completes the GitHub personal access token is destroyed. + +If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the --github-token option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token. + +Import a Role +------------- + +Roles can be imported using ansible-galaxy. The import command expects that the user previously authenticated with Galaxy using the login command. + +Import any GitHub repo you have access to: + +:: + + $ ansible-galaxy import github_user github_repo + +By default the command will wait for the role to be imported by Galaxy, displaying the results as the import progresses: + +:: + + Successfully submitted import request 41 + Starting import 41: role_name=myrole repo=githubuser/ansible-role-repo ref= + Retrieving Github repo githubuser/ansible-role-repo + Accessing branch: master + Parsing and validating meta/main.yml + Parsing galaxy_tags + Parsing platforms + Adding dependencies + Parsing and validating README.md + Adding repo tags as role versions + Import completed + Status SUCCESS : warnings=0 errors=0 + +Use the --branch option to import a specific branch. If not specified, the default branch for the repo will be used. + +If the --no-wait option is present, the command will not wait for results. Results of the most recent import for any of your roles is available on the Galaxy web site under My Imports. + +.. note:: + + The import command is only available in Ansible 2.0. + +Delete a Role +------------- + +Remove a role from the Galaxy web site using the delete command. You can delete any role that you have access to in GitHub. The delete command expects that the user previously authenticated with Galaxy using the login command. + +:: + + ansible-galaxy delete github_user github_repo + +This only removes the role from Galaxy. It does not impact the actual GitHub repo. + +.. note:: + + The delete command is only available in Ansible 2.0. + +Setup Travis Integerations +-------------------------- + +Using the setup command you can enable notifications from `travis <http://travis-ci.org>`_. The setup command expects that the user previously authenticated with Galaxy using the login command. + +:: + + $ ansible-galaxy setup travis github_user github_repo xxxtravistokenxxx + + Added integration for travis chouseknecht/ansible-role-sendmail + +The setup command requires your Travis token. The Travis token is not stored in Galaxy. It is used along with the GitHub username and repo to create a hash as described in `the Travis documentation <https://docs.travis-ci.com/user/notifications/>`_. The calculated hash is stored in Galaxy and used to verify notifications received from Travis. + +The setup command enables Galaxy to respond to notifications. Follow the `Travis getting started guide <https://docs.travis-ci.com/user/getting-started/>`_ to enable the Travis build process for the role repository. + +When you create your .travis.yml file add the following to cause Travis to notify Galaxy when a build completes: + +:: + + notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ + +.. note:: + + The setup command is only available in Ansible 2.0. + + +List Travis Integrtions +======================= + +Use the --list option to display your Travis integrations: + +:: + + $ ansible-galaxy setup --list + + + ID Source Repo + ---------- ---------- ---------- + 2 travis github_user/github_repo + 1 travis github_user/github_repo + + +Remove Travis Integrations +========================== + +Use the --remove option to disable a Travis integration: + +:: + + $ ansible-galaxy setup --remove ID + +Provide the ID of the integration you want disabled. Use the --list option to get the ID. + + + + + + + + + + + + + + + + + + diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 94c04614ace..01e0475b24b 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -22,10 +22,11 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import os import os.path import sys import yaml +import json +import time from collections import defaultdict from jinja2 import Environment @@ -36,7 +37,10 @@ from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.galaxy import Galaxy from ansible.galaxy.api import GalaxyAPI from ansible.galaxy.role import GalaxyRole +from ansible.galaxy.login import GalaxyLogin +from ansible.galaxy.token import GalaxyToken from ansible.playbook.role.requirement import RoleRequirement +from ansible.module_utils.urls import open_url try: from __main__ import display @@ -44,18 +48,52 @@ except ImportError: from ansible.utils.display import Display display = Display() - class GalaxyCLI(CLI): - VALID_ACTIONS = ("init", "info", "install", "list", "remove", "search") + available_commands = { + "delete": "remove a role from Galaxy", + "import": "add a role contained in a GitHub repo to Galaxy", + "info": "display details about a particular role", + "init": "create a role directory structure in your roles path", + "install": "download a role into your roles path", + "list": "enumerate roles found in your roles path", + "login": "authenticate with Galaxy API and store the token", + "remove": "delete a role from your roles path", + "search": "query the Galaxy API", + "setup": "add a TravisCI integration to Galaxy", + } + SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - + def __init__(self, args): - + self.VALID_ACTIONS = self.available_commands.keys() + self.VALID_ACTIONS.sort() self.api = None self.galaxy = None super(GalaxyCLI, self).__init__(args) + def set_action(self): + """ + Get the action the user wants to execute from the sys argv list. + """ + for i in range(0,len(self.args)): + arg = self.args[i] + if arg in self.VALID_ACTIONS: + self.action = arg + del self.args[i] + break + + if not self.action: + self.show_available_actions() + + def show_available_actions(self): + # list available commands + display.display(u'\n' + "usage: ansible-galaxy COMMAND [--help] [options] ...") + display.display(u'\n' + "availabe commands:" + u'\n\n') + for key in self.VALID_ACTIONS: + display.display(u'\t' + "%-12s %s" % (key, self.available_commands[key])) + display.display(' ') + def parse(self): ''' create an options parser for bin/ansible ''' @@ -63,11 +101,21 @@ class GalaxyCLI(CLI): usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS), epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) - + self.set_action() # options specific to actions - if self.action == "info": + if self.action == "delete": + self.parser.set_usage("usage: %prog delete [options] github_user github_repo") + elif self.action == "import": + self.parser.set_usage("usage: %prog import [options] github_user github_repo") + self.parser.add_option('-n', '--no-wait', dest='wait', action='store_false', default=True, + help='Don\'t wait for import results.') + self.parser.add_option('-b', '--branch', dest='reference', + help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)') + self.parser.add_option('-t', '--status', dest='check_status', action='store_true', default=False, + help='Check the status of the most recent import request for given github_user/github_repo.') + elif self.action == "info": self.parser.set_usage("usage: %prog info [options] role_name[,version]") elif self.action == "init": self.parser.set_usage("usage: %prog init [options] role_name") @@ -83,27 +131,40 @@ class GalaxyCLI(CLI): self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') self.parser.add_option('-r', '--role-file', dest='role_file', - help='A file containing a list of roles to be imported') + help='A file containing a list of roles to be imported') elif self.action == "remove": self.parser.set_usage("usage: %prog remove role1 role2 ...") elif self.action == "list": self.parser.set_usage("usage: %prog list [role_name]") + elif self.action == "login": + self.parser.set_usage("usage: %prog login [options]") + self.parser.add_option('-g','--github-token', dest='token', default=None, + help='Identify with github token rather than username and password.') elif self.action == "search": self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by') self.parser.add_option('--galaxy-tags', dest='tags', help='list of galaxy tags to filter by') - self.parser.set_usage("usage: %prog search [<search_term>] [--galaxy-tags <galaxy_tag1,galaxy_tag2>] [--platforms platform]") + self.parser.add_option('--author', dest='author', + help='GitHub username') + self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]") + elif self.action == "setup": + self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret" + + u'\n\n' + "Create an integration with travis.") + self.parser.add_option('-r', '--remove', dest='remove_id', default=None, + help='Remove the integration matching the provided ID value. Use --list to see ID values.') + self.parser.add_option('-l', '--list', dest="setup_list", action='store_true', default=False, + help='List all of your integrations.') # options that apply to more than one action - if self.action != "init": + if not self.action in ("config","import","init","login","setup"): self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' 'ansible.cfg file (/etc/ansible/roles if not configured)') - if self.action in ("info","init","install","search"): - self.parser.add_option('-s', '--server', dest='api_server', default="https://galaxy.ansible.com", + if self.action in ("import","info","init","install","login","search","setup","delete"): + self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination') self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True, help='Ignore SSL certificate validation errors.') @@ -112,23 +173,25 @@ class GalaxyCLI(CLI): self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') - # get options, args and galaxy object - self.options, self.args =self.parser.parse_args(self.args[1:]) - display.verbosity = self.options.verbosity - self.galaxy = Galaxy(self.options) + if self.action: + # get options, args and galaxy object + self.options, self.args =self.parser.parse_args() + display.verbosity = self.options.verbosity + self.galaxy = Galaxy(self.options) return True def run(self): + if not self.action: + return True + super(GalaxyCLI, self).run() # if not offline, get connect to galaxy api - if self.action in ("info","install", "search") or (self.action == 'init' and not self.options.offline): - api_server = self.options.api_server - self.api = GalaxyAPI(self.galaxy, api_server) - if not self.api: - raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server) + if self.action in ("import","info","install","search","login","setup","delete") or \ + (self.action == 'init' and not self.options.offline): + self.api = GalaxyAPI(self.galaxy) self.execute() @@ -188,7 +251,7 @@ class GalaxyCLI(CLI): "however it will reset any main.yml files that may have\n" "been modified there already." % role_path) - # create the default README.md + # create default README.md if not os.path.exists(role_path): os.makedirs(role_path) readme_path = os.path.join(role_path, "README.md") @@ -196,9 +259,16 @@ class GalaxyCLI(CLI): f.write(self.galaxy.default_readme) f.close() + # create default .travis.yml + travis = Environment().from_string(self.galaxy.default_travis).render() + f = open(os.path.join(role_path, '.travis.yml'), 'w') + f.write(travis) + f.close() + for dir in GalaxyRole.ROLE_DIRS: dir_path = os.path.join(init_path, role_name, dir) main_yml_path = os.path.join(dir_path, 'main.yml') + # create the directory if it doesn't exist already if not os.path.exists(dir_path): os.makedirs(dir_path) @@ -234,6 +304,20 @@ class GalaxyCLI(CLI): f.write(rendered_meta) f.close() pass + elif dir == "tests": + # create tests/test.yml + inject = dict( + role_name = role_name + ) + playbook = Environment().from_string(self.galaxy.default_test).render(inject) + f = open(os.path.join(dir_path, 'test.yml'), 'w') + f.write(playbook) + f.close() + + # create tests/inventory + f = open(os.path.join(dir_path, 'inventory'), 'w') + f.write('localhost') + f.close() elif dir not in ('files','templates'): # just write a (mostly) empty YAML file for main.yml f = open(main_yml_path, 'w') @@ -325,7 +409,7 @@ class GalaxyCLI(CLI): for role in required_roles: role = RoleRequirement.role_yaml_parse(role) - display.debug('found role %s in yaml file' % str(role)) + display.vvv('found role %s in yaml file' % str(role)) if 'name' not in role and 'scm' not in role: raise AnsibleError("Must specify name or src for role") roles_left.append(GalaxyRole(self.galaxy, **role)) @@ -348,7 +432,7 @@ class GalaxyCLI(CLI): roles_left.append(GalaxyRole(self.galaxy, rname.strip())) for role in roles_left: - display.debug('Installing role %s ' % role.name) + display.vvv('Installing role %s ' % role.name) # query the galaxy API for the role data if role.install_info is not None and not force: @@ -458,21 +542,189 @@ class GalaxyCLI(CLI): return 0 def execute_search(self): - + page_size = 1000 search = None - if len(self.args) > 1: - raise AnsibleOptionsError("At most a single search term is allowed.") - elif len(self.args) == 1: - search = self.args.pop() + + if len(self.args): + terms = [] + for i in range(len(self.args)): + terms.append(self.args.pop()) + search = '+'.join(terms) - response = self.api.search_roles(search, self.options.platforms, self.options.tags) + if not search and not self.options.platforms and not self.options.tags and not self.options.author: + raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.") - if 'count' in response: - display.display("Found %d roles matching your search:\n" % response['count']) + response = self.api.search_roles(search, platforms=self.options.platforms, + tags=self.options.tags, author=self.options.author, page_size=page_size) + + if response['count'] == 0: + display.display("No roles match your search.", color="yellow") + return True data = '' - if 'results' in response: - for role in response['results']: - data += self._display_role_info(role) + if response['count'] > page_size: + data += ("Found %d roles matching your search. Showing first %s.\n" % (response['count'], page_size)) + else: + data += ("Found %d roles matching your search:\n" % response['count']) + + max_len = [] + for role in response['results']: + max_len.append(len(role['username'] + '.' + role['name'])) + name_len = max(max_len) + format_str = " %%-%ds %%s\n" % name_len + data +='\n' + data += (format_str % ("Name", "Description")) + data += (format_str % ("----", "-----------")) + for role in response['results']: + data += (format_str % (role['username'] + '.' + role['name'],role['description'])) + self.pager(data) + + return True + + def execute_login(self): + """ + Verify user's identify via Github and retreive an auth token from Galaxy. + """ + # Authenticate with github and retrieve a token + if self.options.token is None: + login = GalaxyLogin(self.galaxy) + github_token = login.create_github_token() + else: + github_token = self.options.token + + galaxy_response = self.api.authenticate(github_token) + + if self.options.token is None: + # Remove the token we created + login.remove_github_token() + + # Store the Galaxy token + token = GalaxyToken() + token.set(galaxy_response['token']) + + display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username']) + return 0 + + def execute_import(self): + """ + Import a role into Galaxy + """ + + colors = { + 'INFO': 'normal', + 'WARNING': 'yellow', + 'ERROR': 'red', + 'SUCCESS': 'green', + 'FAILED': 'red' + } + + if len(self.args) < 2: + raise AnsibleError("Expected a github_username and github_repository. Use --help.") + + github_repo = self.args.pop() + github_user = self.args.pop() + + if self.options.check_status: + task = self.api.get_import_task(github_user=github_user, github_repo=github_repo) + else: + # Submit an import request + task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference) + + if len(task) > 1: + # found multiple roles associated with github_user/github_repo + display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), + color='yellow') + display.display("The following Galaxy roles are being updated:" + u'\n', color='yellow') + for t in task: + display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color='yellow') + display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), + color='yellow') + return 0 + # found a single role as expected + display.display("Successfully submitted import request %d" % task[0]['id']) + if not self.options.wait: + display.display("Role name: %s" % task[0]['summary_fields']['role']['name']) + display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo'])) + + if self.options.check_status or self.options.wait: + # Get the status of the import + msg_list = [] + finished = False + while not finished: + task = self.api.get_import_task(task_id=task[0]['id']) + for msg in task[0]['summary_fields']['task_messages']: + if msg['id'] not in msg_list: + display.display(msg['message_text'], color=colors[msg['message_type']]) + msg_list.append(msg['id']) + if task[0]['state'] in ['SUCCESS', 'FAILED']: + finished = True + else: + time.sleep(10) + + return 0 + + def execute_setup(self): + """ + Setup an integration from Github or Travis + """ + + if self.options.setup_list: + # List existing integration secrets + secrets = self.api.list_secrets() + if len(secrets) == 0: + # None found + display.display("No integrations found.") + return 0 + display.display(u'\n' + "ID Source Repo", color="green") + display.display("---------- ---------- ----------", color="green") + for secret in secrets: + display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'], + secret['github_repo']),color="green") + return 0 + + if self.options.remove_id: + # Remove a secret + self.api.remove_secret(self.options.remove_id) + display.display("Secret removed. Integrations using this secret will not longer work.", color="green") + return 0 + + if len(self.args) < 4: + raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret") + return 0 + + secret = self.args.pop() + github_repo = self.args.pop() + github_user = self.args.pop() + source = self.args.pop() + + resp = self.api.add_secret(source, github_user, github_repo, secret) + display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo'])) + + return 0 + + def execute_delete(self): + """ + Delete a role from galaxy.ansible.com + """ + + if len(self.args) < 2: + raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo") + + github_repo = self.args.pop() + github_user = self.args.pop() + resp = self.api.delete_role(github_user, github_repo) + + if len(resp['deleted_roles']) > 1: + display.display("Deleted the following roles:") + display.display("ID User Name") + display.display("------ --------------- ----------") + for role in resp['deleted_roles']: + display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name)) + + display.display(resp['status']) + + return True + + diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 0f809db7297..ae10c5e9a42 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -255,7 +255,8 @@ ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_k PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) # galaxy related -DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com') +GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com') +GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, boolean=True) # this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True) diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py index 00d8c25aecf..62823fced47 100644 --- a/lib/ansible/galaxy/__init__.py +++ b/lib/ansible/galaxy/__init__.py @@ -52,6 +52,8 @@ class Galaxy(object): #TODO: move to getter for lazy loading self.default_readme = self._str_from_data_file('readme') self.default_meta = self._str_from_data_file('metadata_template.j2') + self.default_test = self._str_from_data_file('test_playbook.j2') + self.default_travis = self._str_from_data_file('travis.j2') def add_role(self, role): self.roles[role.name] = role diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py index 2918688406f..c1bf2c4ed50 100644 --- a/lib/ansible/galaxy/api.py +++ b/lib/ansible/galaxy/api.py @@ -25,11 +25,15 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json +import urllib + from urllib2 import quote as urlquote, HTTPError from urlparse import urlparse +import ansible.constants as C from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url +from ansible.galaxy.token import GalaxyToken try: from __main__ import display @@ -43,45 +47,113 @@ class GalaxyAPI(object): SUPPORTED_VERSIONS = ['v1'] - def __init__(self, galaxy, api_server): + def __init__(self, galaxy): self.galaxy = galaxy + self.token = GalaxyToken() + self._api_server = C.GALAXY_SERVER + self._validate_certs = C.GALAXY_IGNORE_CERTS - try: - urlparse(api_server, scheme='https') - except: - raise AnsibleError("Invalid server API url passed: %s" % api_server) + # set validate_certs + if galaxy.options.validate_certs == False: + self._validate_certs = False + display.vvv('Check for valid certs: %s' % self._validate_certs) - server_version = self.get_server_api_version('%s/api/' % (api_server)) - if not server_version: - raise AnsibleError("Could not retrieve server API version: %s" % api_server) + # set the API server + if galaxy.options.api_server != C.GALAXY_SERVER: + self._api_server = galaxy.options.api_server + display.vvv("Connecting to galaxy_server: %s" % self._api_server) + server_version = self.get_server_api_version() + if server_version in self.SUPPORTED_VERSIONS: - self.baseurl = '%s/api/%s' % (api_server, server_version) + self.baseurl = '%s/api/%s' % (self._api_server, server_version) self.version = server_version # for future use - display.vvvvv("Base API: %s" % self.baseurl) + display.vvv("Base API: %s" % self.baseurl) else: raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version) - def get_server_api_version(self, api_server): + def __auth_header(self): + token = self.token.get() + if token is None: + raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.") + return {'Authorization': 'Token ' + token} + + def __call_galaxy(self, url, args=None, headers=None, method=None): + if args and not headers: + headers = self.__auth_header() + try: + display.vvv(url) + resp = open_url(url, data=args, validate_certs=self._validate_certs, headers=headers, method=method) + data = json.load(resp) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['detail']) + return data + + @property + def api_server(self): + return self._api_server + + @property + def validate_certs(self): + return self._validate_certs + + def get_server_api_version(self): """ Fetches the Galaxy API current version to ensure the API server is up and reachable. """ - #TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1) - # also should set baseurl using supported_versions which has path - return 'v1' - try: - data = json.load(open_url(api_server, validate_certs=self.galaxy.options.validate_certs)) - return data.get("current_version", 'v1') - except Exception: - # TODO: report error - return None + url = '%s/api/' % self._api_server + data = json.load(open_url(url, validate_certs=self._validate_certs)) + return data['current_version'] + except Exception as e: + raise AnsibleError("The API server (%s) is not responding, please try again later." % url) + + def authenticate(self, github_token): + """ + Retrieve an authentication token + """ + url = '%s/tokens/' % self.baseurl + args = urllib.urlencode({"github_token": github_token}) + resp = open_url(url, data=args, validate_certs=self._validate_certs, method="POST") + data = json.load(resp) + return data + def create_import_task(self, github_user, github_repo, reference=None): + """ + Post an import request + """ + url = '%s/imports/' % self.baseurl + args = urllib.urlencode({ + "github_user": github_user, + "github_repo": github_repo, + "github_reference": reference if reference else "" + }) + data = self.__call_galaxy(url, args=args) + if data.get('results', None): + return data['results'] + return data + + def get_import_task(self, task_id=None, github_user=None, github_repo=None): + """ + Check the status of an import task. + """ + url = '%s/imports/' % self.baseurl + if not task_id is None: + url = "%s?id=%d" % (url,task_id) + elif not github_user is None and not github_repo is None: + url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo) + else: + raise AnsibleError("Expected task_id or github_user and github_repo") + + data = self.__call_galaxy(url) + return data['results'] + def lookup_role_by_name(self, role_name, notify=True): """ - Find a role by name + Find a role by name. """ role_name = urlquote(role_name) @@ -92,18 +164,12 @@ class GalaxyAPI(object): if notify: display.display("- downloading role '%s', owned by %s" % (role_name, user_name)) except: - raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name) + raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name) url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name) - display.vvvv("- %s" % (url)) - try: - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) - if len(data["results"]) != 0: - return data["results"][0] - except: - # TODO: report on connection/availability errors - pass - + data = self.__call_galaxy(url) + if len(data["results"]) != 0: + return data["results"][0] return None def fetch_role_related(self, related, role_id): @@ -114,13 +180,12 @@ class GalaxyAPI(object): try: url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) results = data['results'] done = (data.get('next', None) is None) while not done: url = '%s%s' % (self.baseurl, data['next']) - display.display(url) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) results += data['results'] done = (data.get('next', None) is None) return results @@ -131,10 +196,9 @@ class GalaxyAPI(object): """ Fetch the list of items specified. """ - try: url = '%s/%s/?page_size' % (self.baseurl, what) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) if "results" in data: results = data['results'] else: @@ -144,41 +208,64 @@ class GalaxyAPI(object): done = (data.get('next', None) is None) while not done: url = '%s%s' % (self.baseurl, data['next']) - display.display(url) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) results += data['results'] done = (data.get('next', None) is None) return results except Exception as error: raise AnsibleError("Failed to download the %s list: %s" % (what, str(error))) - def search_roles(self, search, platforms=None, tags=None): + def search_roles(self, search, **kwargs): - search_url = self.baseurl + '/roles/?page=1' + search_url = self.baseurl + '/search/roles/?' if search: - search_url += '&search=' + urlquote(search) + search_url += '&autocomplete=' + urlquote(search) - if tags is None: - tags = [] - elif isinstance(tags, basestring): + tags = kwargs.get('tags',None) + platforms = kwargs.get('platforms', None) + page_size = kwargs.get('page_size', None) + author = kwargs.get('author', None) + + if tags and isinstance(tags, basestring): tags = tags.split(',') - - for tag in tags: - search_url += '&chain__tags__name=' + urlquote(tag) - - if platforms is None: - platforms = [] - elif isinstance(platforms, basestring): + search_url += '&tags_autocomplete=' + '+'.join(tags) + + if platforms and isinstance(platforms, basestring): platforms = platforms.split(',') + search_url += '&platforms_autocomplete=' + '+'.join(platforms) - for plat in platforms: - search_url += '&chain__platforms__name=' + urlquote(plat) - - display.debug("Executing query: %s" % search_url) - try: - data = json.load(open_url(search_url, validate_certs=self.galaxy.options.validate_certs)) - except HTTPError as e: - raise AnsibleError("Unsuccessful request to server: %s" % str(e)) + if page_size: + search_url += '&page_size=%s' % page_size + if author: + search_url += '&username_autocomplete=%s' % author + + data = self.__call_galaxy(search_url) + return data + + def add_secret(self, source, github_user, github_repo, secret): + url = "%s/notification_secrets/" % self.baseurl + args = urllib.urlencode({ + "source": source, + "github_user": github_user, + "github_repo": github_repo, + "secret": secret + }) + data = self.__call_galaxy(url, args=args) + return data + + def list_secrets(self): + url = "%s/notification_secrets" % self.baseurl + data = self.__call_galaxy(url, headers=self.__auth_header()) + return data + + def remove_secret(self, secret_id): + url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id) + data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE') + return data + + def delete_role(self, github_user, github_repo): + url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo) + data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE') return data diff --git a/lib/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2 index c618adb3d4b..1054c64bdfa 100644 --- a/lib/ansible/galaxy/data/metadata_template.j2 +++ b/lib/ansible/galaxy/data/metadata_template.j2 @@ -2,9 +2,11 @@ galaxy_info: author: {{ author }} description: {{description}} company: {{ company }} + # If the issue tracker for your role is not on github, uncomment the # next line and provide a value # issue_tracker_url: {{ issue_tracker_url }} + # Some suggested licenses: # - BSD (default) # - MIT @@ -13,7 +15,17 @@ galaxy_info: # - Apache # - CC-BY license: {{ license }} + min_ansible_version: {{ min_ansible_version }} + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If travis integration is cofigured, only notification for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + # # Below are all platforms currently available. Just uncomment # the ones that apply to your role. If you don't see your @@ -28,6 +40,7 @@ galaxy_info: # - {{ version }} {%- endfor %} {%- endfor %} + galaxy_tags: [] # List tags for your role here, one per line. A tag is # a keyword that describes and categorizes the role. @@ -36,6 +49,7 @@ galaxy_info: # # NOTE: A tag is limited to a single word comprised of # alphanumeric characters. Maximum 20 tags per role. + dependencies: [] # List your role dependencies here, one per line. # Be sure to remove the '[]' above if you add dependencies diff --git a/lib/ansible/galaxy/data/test_playbook.j2 b/lib/ansible/galaxy/data/test_playbook.j2 new file mode 100644 index 00000000000..45824f60519 --- /dev/null +++ b/lib/ansible/galaxy/data/test_playbook.j2 @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - {{ role_name }} \ No newline at end of file diff --git a/lib/ansible/galaxy/data/travis.j2 b/lib/ansible/galaxy/data/travis.j2 new file mode 100644 index 00000000000..36bbf6208cf --- /dev/null +++ b/lib/ansible/galaxy/data/travis.j2 @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/lib/ansible/galaxy/login.py b/lib/ansible/galaxy/login.py new file mode 100644 index 00000000000..3edaed7bc70 --- /dev/null +++ b/lib/ansible/galaxy/login.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python + +######################################################################## +# +# (C) 2015, Chris Houseknecht <chouse@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +######################################################################## + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import getpass +import json +import urllib + +from urllib2 import quote as urlquote, HTTPError +from urlparse import urlparse + +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.module_utils.urls import open_url +from ansible.utils.color import stringc + +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + +class GalaxyLogin(object): + ''' Class to handle authenticating user with Galaxy API prior to performing CUD operations ''' + + GITHUB_AUTH = 'https://api.github.com/authorizations' + + def __init__(self, galaxy, github_token=None): + self.galaxy = galaxy + self.github_username = None + self.github_password = None + + if github_token == None: + self.get_credentials() + + def get_credentials(self): + display.display(u'\n\n' + "We need your " + stringc("Github login",'bright cyan') + + " to identify you.", screen_only=True) + display.display("This information will " + stringc("not be sent to Galaxy",'bright cyan') + + ", only to " + stringc("api.github.com.","yellow"), screen_only=True) + display.display("The password will not be displayed." + u'\n\n', screen_only=True) + display.display("Use " + stringc("--github-token",'yellow') + + " if you do not want to enter your password." + u'\n\n', screen_only=True) + + try: + self.github_username = raw_input("Github Username: ") + except: + pass + + try: + self.github_password = getpass.getpass("Password for %s: " % self.github_username) + except: + pass + + if not self.github_username or not self.github_password: + raise AnsibleError("Invalid Github credentials. Username and password are required.") + + def remove_github_token(self): + ''' + If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot + retrieve the token after creation, so we are forced to create a new one. + ''' + try: + tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username, + url_password=self.github_password, force_basic_auth=True,)) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['message']) + + for token in tokens: + if token['note'] == 'ansible-galaxy login': + display.vvvvv('removing token: %s' % token['token_last_eight']) + try: + open_url('https://api.github.com/authorizations/%d' % token['id'], url_username=self.github_username, + url_password=self.github_password, method='DELETE', force_basic_auth=True,) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['message']) + + def create_github_token(self): + ''' + Create a personal authorization token with a note of 'ansible-galaxy login' + ''' + self.remove_github_token() + args = json.dumps({"scopes":["public_repo"], "note":"ansible-galaxy login"}) + try: + data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username, + url_password=self.github_password, force_basic_auth=True, data=args)) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['message']) + return data['token'] diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py index dc9da5d79ce..36b1e0fbbba 100644 --- a/lib/ansible/galaxy/role.py +++ b/lib/ansible/galaxy/role.py @@ -46,7 +46,7 @@ class GalaxyRole(object): SUPPORTED_SCMS = set(['git', 'hg']) META_MAIN = os.path.join('meta', 'main.yml') META_INSTALL = os.path.join('meta', '.galaxy_install_info') - ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') + ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests') def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None): @@ -198,10 +198,10 @@ class GalaxyRole(object): role_data = self.src tmp_file = self.fetch(role_data) else: - api = GalaxyAPI(self.galaxy, self.options.api_server) + api = GalaxyAPI(self.galaxy) role_data = api.lookup_role_by_name(self.src) if not role_data: - raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.options.api_server)) + raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server)) role_versions = api.fetch_role_related('versions', role_data['id']) if not self.version: @@ -213,8 +213,10 @@ class GalaxyRole(object): loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] loose_versions.sort() self.version = str(loose_versions[-1]) + elif role_data.get('github_branch', None): + self.version = role_data['github_branch'] else: - self.version = 'master' + self.version = 'master' elif self.version != 'master': if role_versions and self.version not in [a.get('name', None) for a in role_versions]: raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions)) diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py new file mode 100644 index 00000000000..02ca8330697 --- /dev/null +++ b/lib/ansible/galaxy/token.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python + +######################################################################## +# +# (C) 2015, Chris Houseknecht <chouse@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +######################################################################## +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import yaml +from stat import * + +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + + +class GalaxyToken(object): + ''' Class to storing and retrieving token in ~/.ansible_galaxy ''' + + def __init__(self): + self.file = os.path.expanduser("~") + '/.ansible_galaxy' + self.config = yaml.safe_load(self.__open_config_for_read()) + if not self.config: + self.config = {} + + def __open_config_for_read(self): + if os.path.isfile(self.file): + display.vvv('Opened %s' % self.file) + return open(self.file, 'r') + # config.yml not found, create and chomd u+rw + f = open(self.file,'w') + f.close() + os.chmod(self.file,S_IRUSR|S_IWUSR) # owner has +rw + display.vvv('Created %s' % self.file) + return open(self.file, 'r') + + def set(self, token): + self.config['token'] = token + self.save() + + def get(self): + return self.config.get('token', None) + + def save(self): + with open(self.file,'w') as f: + yaml.safe_dump(self.config,f,default_flow_style=False) + \ No newline at end of file From 04fc3f118f5989df4c2ba462d86a75d0b72fc50a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Dec 2015 08:23:45 -0800 Subject: [PATCH 0065/1113] Code smell test for specifying both required and default in FieldAttributes --- .travis.yml | 1 + test/code-smell/required-and-default-attributes.sh | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100755 test/code-smell/required-and-default-attributes.sh diff --git a/.travis.yml b/.travis.yml index 1ff0ca118d4..603132f722c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,6 +24,7 @@ script: - ./test/code-smell/replace-urlopen.sh . - ./test/code-smell/use-compat-six.sh lib - ./test/code-smell/boilerplate.sh +- ./test/code-smell/required-and-default-attributes.sh - if test x"$TOXENV" != x'py24' ; then tox ; fi - if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi #- make -C docsite all diff --git a/test/code-smell/required-and-default-attributes.sh b/test/code-smell/required-and-default-attributes.sh new file mode 100755 index 00000000000..9822a155973 --- /dev/null +++ b/test/code-smell/required-and-default-attributes.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +BASEDIR=${1-"lib/ansible"} +cd "$BASEDIR" +grep -r FieldAttribute . |grep 'default' | grep 'required' +if test $? -eq 0 ; then + exit 1 +fi +exit 0 + From c64298de02a9998d6c5774ccb1f92a9aec435d74 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Dec 2015 08:22:58 -0800 Subject: [PATCH 0066/1113] Revert "avoid persistent containers in attribute defaults" This reverts commit 87969868d42cd8aba1c65c8207d059d73407373b. found better way to do it --- lib/ansible/playbook/attribute.py | 11 ----------- lib/ansible/playbook/block.py | 6 +++--- lib/ansible/playbook/conditional.py | 2 +- lib/ansible/playbook/play.py | 16 ++++++++-------- lib/ansible/playbook/play_context.py | 4 ++-- lib/ansible/playbook/playbook_include.py | 2 +- lib/ansible/playbook/role/metadata.py | 2 +- lib/ansible/playbook/taggable.py | 2 +- lib/ansible/playbook/task.py | 2 +- 9 files changed, 18 insertions(+), 29 deletions(-) diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py index ce7ed6d8fe7..703d9dbca1e 100644 --- a/lib/ansible/playbook/attribute.py +++ b/lib/ansible/playbook/attribute.py @@ -32,17 +32,6 @@ class Attribute: self.priority = priority self.always_post_validate = always_post_validate - # This is here to avoid `default=<container>` unwanted persistence across object instances - # We cannot rely on None as some fields use it to skip the code - # that would detect an empty container as a user error - if self.default == '_ansible_container': - if self.isa == 'list': - self.default = [] - elif self.isa == 'dict': - self.default = {} - elif self.isa == 'set': - self.default = set() - def __eq__(self, other): return other.priority == self.priority diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 66009b028af..f2d9c82833a 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -30,9 +30,9 @@ from ansible.playbook.taggable import Taggable class Block(Base, Become, Conditional, Taggable): - _block = FieldAttribute(isa='list', default='_ansible_container') - _rescue = FieldAttribute(isa='list', default='_ansible_container') - _always = FieldAttribute(isa='list', default='_ansible_container') + _block = FieldAttribute(isa='list', default=[]) + _rescue = FieldAttribute(isa='list', default=[]) + _always = FieldAttribute(isa='list', default=[]) _delegate_to = FieldAttribute(isa='list') _delegate_facts = FieldAttribute(isa='bool', default=False) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index a5b3ca725f8..fc178e2fa1d 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -33,7 +33,7 @@ class Conditional: to be run conditionally when a condition is met or skipped. ''' - _when = FieldAttribute(isa='list', default='_ansible_container') + _when = FieldAttribute(isa='list', default=[]) def __init__(self, loader=None): # when used directly, this class needs a loader, but we want to diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index e08c8c60016..ed61416e951 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,22 +64,22 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _hosts = FieldAttribute(isa='list', default='_ansible_container', required=True, listof=string_types, always_post_validate=True) + _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) # Variable Attributes - _vars_files = FieldAttribute(isa='list', default='_ansible_container', priority=99) - _vars_prompt = FieldAttribute(isa='list', default='_ansible_container', always_post_validate=True) + _vars_files = FieldAttribute(isa='list', default=[], priority=99) + _vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True) _vault_password = FieldAttribute(isa='string', always_post_validate=True) # Role Attributes - _roles = FieldAttribute(isa='list', default='_ansible_container', priority=90) + _roles = FieldAttribute(isa='list', default=[], priority=90) # Block (Task) Lists Attributes - _handlers = FieldAttribute(isa='list', default='_ansible_container') - _pre_tasks = FieldAttribute(isa='list', default='_ansible_container') - _post_tasks = FieldAttribute(isa='list', default='_ansible_container') - _tasks = FieldAttribute(isa='list', default='_ansible_container') + _handlers = FieldAttribute(isa='list', default=[]) + _pre_tasks = FieldAttribute(isa='list', default=[]) + _post_tasks = FieldAttribute(isa='list', default=[]) + _tasks = FieldAttribute(isa='list', default=[]) # Flag/Setting Attributes _any_errors_fatal = FieldAttribute(isa='bool', default=False, always_post_validate=True) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index da291c3c834..81223500adf 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -171,8 +171,8 @@ class PlayContext(Base): # general flags _verbosity = FieldAttribute(isa='int', default=0) - _only_tags = FieldAttribute(isa='set', default='_ansible_container') - _skip_tags = FieldAttribute(isa='set', default='_ansible_container') + _only_tags = FieldAttribute(isa='set', default=set()) + _skip_tags = FieldAttribute(isa='set', default=set()) _check_mode = FieldAttribute(isa='bool', default=False) _force_handlers = FieldAttribute(isa='bool', default=False) _start_at_task = FieldAttribute(isa='string') diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index 52081c41539..d9af2ba5237 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -35,7 +35,7 @@ class PlaybookInclude(Base, Conditional, Taggable): _name = FieldAttribute(isa='string') _include = FieldAttribute(isa='string') - _vars = FieldAttribute(isa='dict', default='_ansible_container') + _vars = FieldAttribute(isa='dict', default=dict()) @staticmethod def load(data, basedir, variable_manager=None, loader=None): diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py index 4bb7d0ce02b..58b59145a1c 100644 --- a/lib/ansible/playbook/role/metadata.py +++ b/lib/ansible/playbook/role/metadata.py @@ -40,7 +40,7 @@ class RoleMetadata(Base): ''' _allow_duplicates = FieldAttribute(isa='bool', default=False) - _dependencies = FieldAttribute(isa='list', default='_ansible_container') + _dependencies = FieldAttribute(isa='list', default=[]) _galaxy_info = FieldAttribute(isa='GalaxyInfo') def __init__(self, owner=None): diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py index 37e3261e80d..8f5cfa09344 100644 --- a/lib/ansible/playbook/taggable.py +++ b/lib/ansible/playbook/taggable.py @@ -29,7 +29,7 @@ from ansible.template import Templar class Taggable: untagged = frozenset(['untagged']) - _tags = FieldAttribute(isa='list', default='_ansible_container', listof=(string_types,int)) + _tags = FieldAttribute(isa='list', default=[], listof=(string_types,int)) def __init__(self): super(Taggable, self).__init__() diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 53a9a3c3931..17f1952e39c 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -64,7 +64,7 @@ class Task(Base, Conditional, Taggable, Become): # will be used if defined # might be possible to define others - _args = FieldAttribute(isa='dict', default='_ansible_container') + _args = FieldAttribute(isa='dict', default=dict()) _action = FieldAttribute(isa='string') _any_errors_fatal = FieldAttribute(isa='bool') From 2820b4c243d50416f661c4ea9408bba1918244bb Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Dec 2015 08:23:45 -0800 Subject: [PATCH 0067/1113] removed default from hosts to make it requried prevents writing a play w/o a hosts entry which would default to all/empty --- lib/ansible/playbook/play.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index ed61416e951..bc033148646 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,7 +64,7 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True) + _hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) # Variable Attributes From 2bfb13bfb39bf31c5c1bc40f376907fc50ca69ef Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Dec 2015 08:28:54 -0800 Subject: [PATCH 0068/1113] removed unused 'pattern' from ansible.cfg also moved the config param to a 'deprecated' list in constants.py added TODO for producing a deprecation warning for such vars --- examples/ansible.cfg | 1 - lib/ansible/constants.py | 8 ++++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 87c089f45ae..ec3ddf20641 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -14,7 +14,6 @@ #inventory = /etc/ansible/hosts #library = /usr/share/my_modules/ #remote_tmp = $HOME/.ansible/tmp -#pattern = * #forks = 5 #poll_interval = 15 #sudo_user = root diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index ae10c5e9a42..7f74358dd5d 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -120,16 +120,20 @@ DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'd # sections in config file DEFAULTS='defaults' +# FIXME: add deprecation warning when these get set +#### DEPRECATED VARS #### +# use more sanely named 'inventory' DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True) +# this is not used since 0.5 but people might still have in config +DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None) -# generally configurable things +#### GENERALLY CONFIGURABLE THINGS #### DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispath=True) DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispath=True) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') -DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*') DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True) DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '') DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8')) From ae2447df9136353453c9ed48d44b2c7fa70231b0 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Dec 2015 08:38:53 -0800 Subject: [PATCH 0069/1113] attribute defaults that are containers are a copy This is simpler way to prevent persistent containers across instances of classes that use field attributes --- lib/ansible/playbook/attribute.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py index 703d9dbca1e..0befb9d80df 100644 --- a/lib/ansible/playbook/attribute.py +++ b/lib/ansible/playbook/attribute.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from copy import deepcopy class Attribute: @@ -32,6 +33,11 @@ class Attribute: self.priority = priority self.always_post_validate = always_post_validate + if default is not None and self.isa in ('list', 'dict', 'set'): + self.default = deepcopy(default) + else: + self.default = default + def __eq__(self, other): return other.priority == self.priority From 0211da2fe9a7b3cefa79d72aab599546bf923e1b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Dec 2015 08:44:09 -0800 Subject: [PATCH 0070/1113] Clarify language of delegate_facts documentation --- docsite/rst/playbooks_delegation.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index 4e2e8c372ac..c715adea361 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -137,8 +137,8 @@ Delegated facts .. versionadded:: 2.0 -Before 2.0 any facts gathered by a delegated task were assigned to the `inventory_hostname` (current host) instead of the host which actually produced the facts (delegated to host). -The new directive `delegate_facts` if set to `True` will assing the task's gathered facts to the delegated host instead of the current one.:: +By default, any fact gathered by a delegated task are assigned to the `inventory_hostname` (the current host) instead of the host which actually produced the facts (the delegated to host). +In 2.0, the directive `delegate_facts` may be set to `True` to assign the task's gathered facts to the delegated host instead of the current one.:: - hosts: app_servers @@ -149,8 +149,8 @@ The new directive `delegate_facts` if set to `True` will assing the task's gathe delegate_facts: True with_items: "{{groups['dbservers'}}" -The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers, -that way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`. +The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers. +This way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`. .. _run_once: From 7936a4687e9be3752bdbee006d956ed4f2687160 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Dec 2015 10:01:21 -0800 Subject: [PATCH 0071/1113] adhoc avoids callbacks by default as it did before Previous emptying of whitelist only affected callbacks that were constructed for need whitelist. This now works for all callbacks. --- lib/ansible/cli/adhoc.py | 4 +--- lib/ansible/executor/task_queue_manager.py | 5 +++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 912b07a5c72..f6dcb37a8ab 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -163,9 +163,6 @@ class AdHocCLI(CLI): else: cb = 'minimal' - if not C.DEFAULT_LOAD_CALLBACK_PLUGINS: - C.DEFAULT_CALLBACK_WHITELIST = [] - if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree @@ -180,6 +177,7 @@ class AdHocCLI(CLI): options=self.options, passwords=passwords, stdout_callback=cb, + run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, ) result = self._tqm.run(play) finally: diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index d665000046c..70cefee510b 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -56,7 +56,7 @@ class TaskQueueManager: which dispatches the Play's tasks to hosts. ''' - def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None): + def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True): self._inventory = inventory self._variable_manager = variable_manager @@ -65,6 +65,7 @@ class TaskQueueManager: self._stats = AggregateStats() self.passwords = passwords self._stdout_callback = stdout_callback + self._run_additional_callbacks = run_additional_callbacks self._callbacks_loaded = False self._callback_plugins = [] @@ -159,7 +160,7 @@ class TaskQueueManager: if callback_name != self._stdout_callback or stdout_callback_loaded: continue stdout_callback_loaded = True - elif callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST): + elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)): continue self._callback_plugins.append(callback_plugin()) From 04d74fd6804b5a851cc8762cecf07b100e4dcc6f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Dec 2015 10:13:50 -0800 Subject: [PATCH 0072/1113] reenabled --tree for ansible adhoc command previous fix to avoid callbacks now conflicted with tree optoin which is implemented as a callback in 2.0 --- lib/ansible/cli/adhoc.py | 3 +++ lib/ansible/executor/task_queue_manager.py | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index f6dcb37a8ab..3de0e55b7bb 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -163,9 +163,11 @@ class AdHocCLI(CLI): else: cb = 'minimal' + run_tree=False if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree + run_tree=True # now create a task queue manager to execute the play self._tqm = None @@ -178,6 +180,7 @@ class AdHocCLI(CLI): passwords=passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, + run_tree=run_tree, ) result = self._tqm.run(play) finally: diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 70cefee510b..74111382935 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -56,7 +56,7 @@ class TaskQueueManager: which dispatches the Play's tasks to hosts. ''' - def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True): + def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False): self._inventory = inventory self._variable_manager = variable_manager @@ -66,6 +66,7 @@ class TaskQueueManager: self.passwords = passwords self._stdout_callback = stdout_callback self._run_additional_callbacks = run_additional_callbacks + self._run_tree = run_tree self._callbacks_loaded = False self._callback_plugins = [] @@ -160,6 +161,8 @@ class TaskQueueManager: if callback_name != self._stdout_callback or stdout_callback_loaded: continue stdout_callback_loaded = True + elif callback_name == 'tree' and self._run_tree: + pass elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)): continue From 14e19c239d610619498f06978e2841764a262e15 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 9 Dec 2015 14:51:43 -0500 Subject: [PATCH 0073/1113] Make on_file_diff callback item-aware --- lib/ansible/plugins/callback/__init__.py | 6 +++++- lib/ansible/plugins/callback/default.py | 9 ++++++++- lib/ansible/plugins/callback/skippy.py | 9 ++++++++- lib/ansible/plugins/strategy/__init__.py | 2 +- 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 03eb58d99db..b8a48943f28 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -59,6 +59,10 @@ class CallbackBase: version = getattr(self, 'CALLBACK_VERSION', '1.0') self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version)) + def _copy_result(self, result): + ''' helper for callbacks, so they don't all have to include deepcopy ''' + return deepcopy(result) + def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if result.get('_ansible_no_log', False): return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result")) @@ -126,7 +130,7 @@ class CallbackBase: def _process_items(self, result): for res in result._result['results']: - newres = deepcopy(result) + newres = self._copy_result(result) res['item'] = self._get_item(res) newres._result = res if 'failed' in res and res['failed']: diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 3175bf3e53c..1f37f4b975e 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -134,7 +134,14 @@ class CallbackModule(CallbackBase): self._display.banner(msg) def v2_on_file_diff(self, result): - if 'diff' in result._result and result._result['diff']: + if result._task.loop and 'results' in result._result: + for res in result._result['results']: + newres = self._copy_result(result) + res['item'] = self._get_item(res) + newres._result = res + + self.v2_on_file_diff(newres) + elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) def v2_playbook_item_on_ok(self, result): diff --git a/lib/ansible/plugins/callback/skippy.py b/lib/ansible/plugins/callback/skippy.py index 15b7d3387c2..495943417fd 100644 --- a/lib/ansible/plugins/callback/skippy.py +++ b/lib/ansible/plugins/callback/skippy.py @@ -123,7 +123,14 @@ class CallbackModule(CallbackBase): self._display.banner(msg) def v2_on_file_diff(self, result): - if 'diff' in result._result and result._result['diff']: + if result._task.loop and 'results' in result._result: + for res in result._result['results']: + newres = self._copy_result(result) + res['item'] = self._get_item(res) + newres._result = res + + self.v2_on_file_diff(newres) + elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) def v2_playbook_item_on_ok(self, result): diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 732a9293d28..15636b580d1 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -221,7 +221,7 @@ class StrategyBase: self._tqm._stats.increment('changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) - if self._diff and 'diff' in task_result._result: + if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 From 61dc4a7e67bcb7c968e273ee39618d1f76f7ab9e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Dec 2015 12:10:21 -0800 Subject: [PATCH 0074/1113] Update module refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 191347676ee..0b5555b62cd 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 191347676eea08817da3fb237f24cdbf2d16e307 +Subproject commit 0b5555b62cd8d91fb4fa434217671f3acaebbf5a diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index a10bdd6be94..cbed6420094 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit a10bdd6be948d3aa5fad7ff4959908d6e78e0528 +Subproject commit cbed642009497ddaf19b5f578ab6c78da1356eda From 64864829c4a858e296b049075675e960de678690 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Dec 2015 12:37:56 -0800 Subject: [PATCH 0075/1113] changed deprecation to removal warning --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index d7d0f03fb1f..3c1331e7065 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -388,7 +388,7 @@ class Inventory(object): end = -1 subscript = (int(start), int(end)) if sep == '-': - display.deprecated("Use [x:y] inclusive subscripts instead of [x-y]", version=2.0, removed=True) + display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed") return (pattern, subscript) From 07bf4d9ac4899eb2e0e8246530ff2ca3ee75f3ef Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Wed, 9 Dec 2015 15:48:53 -0500 Subject: [PATCH 0076/1113] added winrm CP notes to changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d31ef4ebb2..2bf11e6c5bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,8 @@ newline being stripped you can change your playbook like this: ###Plugins * Rewritten dnf module that should be faster and less prone to encountering bugs in cornercases +* WinRM connection plugin passes all vars named `ansible_winrm_*` to the underlying pywinrm client. This allows, for instance, `ansible_winrm_server_cert_validation=ignore` to be used with newer versions of pywinrm to disable certificate validation on Python 2.7.9+. +* WinRM connection plugin put_file is significantly faster and no longer has file size limitations. ####Deprecated Modules (new ones in parens): From c0d79cf7e10da157ae1b28283ab7b564baee7b51 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Dec 2015 13:07:00 -0800 Subject: [PATCH 0077/1113] Remove the funcd connection plugin --- lib/ansible/plugins/connection/funcd.py | 99 ------------------------- 1 file changed, 99 deletions(-) delete mode 100644 lib/ansible/plugins/connection/funcd.py diff --git a/lib/ansible/plugins/connection/funcd.py b/lib/ansible/plugins/connection/funcd.py deleted file mode 100644 index 4c9e09be65c..00000000000 --- a/lib/ansible/plugins/connection/funcd.py +++ /dev/null @@ -1,99 +0,0 @@ -# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> -# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com> -# (c) 2013, Michael Scherer <misc@zarb.org> -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -# --- -# The func transport permit to use ansible over func. For people who have already setup -# func and that wish to play with ansible, this permit to move gradually to ansible -# without having to redo completely the setup of the network. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -HAVE_FUNC=False -try: - import func.overlord.client as fc - HAVE_FUNC=True -except ImportError: - pass - -import os -from ansible.callbacks import vvv -from ansible import errors -import tempfile -import shutil - - -class Connection(object): - ''' Func-based connections ''' - - def __init__(self, runner, host, port, *args, **kwargs): - self.runner = runner - self.host = host - self.has_pipelining = False - # port is unused, this go on func - self.port = port - - def connect(self, port=None): - if not HAVE_FUNC: - raise errors.AnsibleError("func is not installed") - - self.client = fc.Client(self.host) - return self - - def exec_command(self, cmd, become_user=None, sudoable=False, - executable='/bin/sh', in_data=None): - ''' run a command on the remote minion ''' - - if in_data: - raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - - # totally ignores privlege escalation - vvv("EXEC %s" % (cmd), host=self.host) - p = self.client.command.run(cmd)[self.host] - return (p[0], p[1], p[2]) - - def _normalize_path(self, path, prefix): - if not path.startswith(os.path.sep): - path = os.path.join(os.path.sep, path) - normpath = os.path.normpath(path) - return os.path.join(prefix, normpath[1:]) - - def put_file(self, in_path, out_path): - ''' transfer a file from local to remote ''' - - out_path = self._normalize_path(out_path, '/') - vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) - self.client.local.copyfile.send(in_path, out_path) - - def fetch_file(self, in_path, out_path): - ''' fetch a file from remote to local ''' - - in_path = self._normalize_path(in_path, '/') - vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) - # need to use a tmp dir due to difference of semantic for getfile - # ( who take a # directory as destination) and fetch_file, who - # take a file directly - tmpdir = tempfile.mkdtemp(prefix="func_ansible") - self.client.local.getfile.get(in_path, tmpdir) - shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), - out_path) - shutil.rmtree(tmpdir) - - def close(self): - ''' terminate the connection; nothing to do here ''' - pass From 18ac12aee60b0033d4b8af4a78ddbd55335c2991 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= <yannig.perre@gmail.com> Date: Wed, 9 Dec 2015 22:08:30 +0100 Subject: [PATCH 0078/1113] Do not fail when variable is not correct in debug action. See https://github.com/ansible/ansible/issues/13484 for more information. --- lib/ansible/plugins/action/debug.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index 1d8e28c7a4a..a0ffb714044 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -40,7 +40,7 @@ class ActionModule(ActionBase): result['msg'] = self._task.args['msg'] # FIXME: move the LOOKUP_REGEX somewhere else elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): - results = self._templar.template(self._task.args['var'], convert_bare=True) + results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=False) if type(self._task.args['var']) in (list, dict): # If var is a list or dict, use the type as key to display result[to_unicode(type(self._task.args['var']))] = results From a7cd41b482dc6bf1bf1073e451aa1b38526dde08 Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Wed, 9 Dec 2015 16:29:39 -0500 Subject: [PATCH 0079/1113] Windows doc updates --- docsite/rst/intro_windows.rst | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index e5cbb94fafd..1adcc35010f 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -31,7 +31,7 @@ On a Linux control machine:: Active Directory Support ++++++++++++++++++++++++ -If you wish to connect to domain accounts published through Active Directory (as opposed to local accounts created on the remote host), you will need to install the "python-kerberos" module and the MIT krb5 libraries it depends on. +If you wish to connect to domain accounts published through Active Directory (as opposed to local accounts created on the remote host), you will need to install the "python-kerberos" module on the Ansible control host (and the MIT krb5 libraries it depends on). The Ansible control host also requires a properly configured computer account in Active Directory. Installing python-kerberos dependencies --------------------------------------- @@ -131,7 +131,9 @@ To test this, ping the windows host you want to control by name then use the ip If you get different hostnames back than the name you originally pinged, speak to your active directory administrator and get them to check that DNS Scavenging is enabled and that DNS and DHCP are updating each other. -Check your ansible controller's clock is synchronised with your domain controller. Kerberos is time sensitive and a little clock drift can cause tickets not be granted. +Ensure that the Ansible controller has a properly configured computer account in the domain. + +Check your Ansible controller's clock is synchronised with your domain controller. Kerberos is time sensitive and a little clock drift can cause tickets not be granted. Check you are using the real fully qualified domain name for the domain. Sometimes domains are commonly known to users by aliases. To check this run: @@ -165,6 +167,8 @@ In group_vars/windows.yml, define the following inventory variables:: ansible_password: SecretPasswordGoesHere ansible_port: 5986 ansible_connection: winrm + # The following is necessary for Python 2.7.9+ when using default WinRM self-signed certificates: + ansible_winrm_server_cert_validation: ignore Although Ansible is mostly an SSH-oriented system, Windows management will not happen over SSH (`yet <http://blogs.msdn.com/b/powershell/archive/2015/06/03/looking-forward-microsoft-support-for-secure-shell-ssh.aspx>`). @@ -189,6 +193,7 @@ Since 2.0, the following custom inventory variables are also supported for addit * ``ansible_winrm_path``: Specify an alternate path to the WinRM endpoint. Ansible uses ``/wsman`` by default. * ``ansible_winrm_realm``: Specify the realm to use for Kerberos authentication. If the username contains ``@``, Ansible will use the part of the username after ``@`` by default. * ``ansible_winrm_transport``: Specify one or more transports as a comma-separated list. By default, Ansible will use ``kerberos,plaintext`` if the ``kerberos`` module is installed and a realm is defined, otherwise ``plaintext``. +* ``ansible_winrm_server_cert_validation``: Specify the server certificate validation mode (``ignore`` or ``validate``). Ansible defaults to ``validate`` on Python 2.7.9 and higher, which will result in certificate validation errors against the Windows self-signed certificates. Unless verifiable certificates have been configured on the WinRM listeners, this should be set to ``ignore`` * ``ansible_winrm_*``: Any additional keyword arguments supported by ``winrm.Protocol`` may be provided. .. _windows_system_prep: @@ -221,7 +226,7 @@ Getting to PowerShell 3.0 or higher PowerShell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script. Note that PowerShell 3.0 is only supported on Windows 7 SP1, Windows Server 2008 SP1, and later releases of Windows. -Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 <https://github.com/cchurch/ansible/blob/devel/examples/scripts/upgrade_to_ps3.ps1>`_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above. +Looking at an Ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 <https://github.com/cchurch/ansible/blob/devel/examples/scripts/upgrade_to_ps3.ps1>`_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above. .. _what_windows_modules_are_available: @@ -248,10 +253,10 @@ Note there are a few other Ansible modules that don't start with "win" that also Developers: Supported modules and how it works `````````````````````````````````````````````` -Developing ansible modules are covered in a `later section of the documentation <http://docs.ansible.com/developing_modules.html>`_, with a focus on Linux/Unix. -What if you want to write Windows modules for ansible though? +Developing Ansible modules are covered in a `later section of the documentation <http://docs.ansible.com/developing_modules.html>`_, with a focus on Linux/Unix. +What if you want to write Windows modules for Ansible though? -For Windows, ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. +For Windows, Ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. Windows modules live in a "windows/" subfolder in the Ansible "library/" subtree. For example, if a module is named "library/windows/win_ping", there will be embedded documentation in the "win_ping" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense. @@ -351,7 +356,7 @@ form of new modules, tweaks to existing modules, documentation, or something els :doc:`developing_modules` How to write modules :doc:`playbooks` - Learning ansible's configuration management language + Learning Ansible's configuration management language `List of Windows Modules <http://docs.ansible.com/list_of_windows_modules.html>`_ Windows specific module list, all implemented in PowerShell `Mailing List <http://groups.google.com/group/ansible-project>`_ From 62cbc03af6410df2b9c61a5056f71a51dd2570ec Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Dec 2015 13:29:53 -0800 Subject: [PATCH 0080/1113] Revert "Remove the funcd connection plugin" This reverts commit c0d79cf7e10da157ae1b28283ab7b564baee7b51. We may still port the funcd connection plugin, just not in time for 2.0.0 --- lib/ansible/plugins/connection/funcd.py | 99 +++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 lib/ansible/plugins/connection/funcd.py diff --git a/lib/ansible/plugins/connection/funcd.py b/lib/ansible/plugins/connection/funcd.py new file mode 100644 index 00000000000..4c9e09be65c --- /dev/null +++ b/lib/ansible/plugins/connection/funcd.py @@ -0,0 +1,99 @@ +# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> +# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com> +# (c) 2013, Michael Scherer <misc@zarb.org> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# --- +# The func transport permit to use ansible over func. For people who have already setup +# func and that wish to play with ansible, this permit to move gradually to ansible +# without having to redo completely the setup of the network. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +HAVE_FUNC=False +try: + import func.overlord.client as fc + HAVE_FUNC=True +except ImportError: + pass + +import os +from ansible.callbacks import vvv +from ansible import errors +import tempfile +import shutil + + +class Connection(object): + ''' Func-based connections ''' + + def __init__(self, runner, host, port, *args, **kwargs): + self.runner = runner + self.host = host + self.has_pipelining = False + # port is unused, this go on func + self.port = port + + def connect(self, port=None): + if not HAVE_FUNC: + raise errors.AnsibleError("func is not installed") + + self.client = fc.Client(self.host) + return self + + def exec_command(self, cmd, become_user=None, sudoable=False, + executable='/bin/sh', in_data=None): + ''' run a command on the remote minion ''' + + if in_data: + raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") + + # totally ignores privlege escalation + vvv("EXEC %s" % (cmd), host=self.host) + p = self.client.command.run(cmd)[self.host] + return (p[0], p[1], p[2]) + + def _normalize_path(self, path, prefix): + if not path.startswith(os.path.sep): + path = os.path.join(os.path.sep, path) + normpath = os.path.normpath(path) + return os.path.join(prefix, normpath[1:]) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to remote ''' + + out_path = self._normalize_path(out_path, '/') + vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + self.client.local.copyfile.send(in_path, out_path) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from remote to local ''' + + in_path = self._normalize_path(in_path, '/') + vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + # need to use a tmp dir due to difference of semantic for getfile + # ( who take a # directory as destination) and fetch_file, who + # take a file directly + tmpdir = tempfile.mkdtemp(prefix="func_ansible") + self.client.local.getfile.get(in_path, tmpdir) + shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), + out_path) + shutil.rmtree(tmpdir) + + def close(self): + ''' terminate the connection; nothing to do here ''' + pass From a19e083d33ae5ae59be358c9468a4318aca3174f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Dec 2015 13:52:01 -0800 Subject: [PATCH 0081/1113] Note that handlers inside of includes are not possible at the moment --- docsite/rst/playbooks_intro.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index e0f1aec5c10..28c809f0132 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -386,6 +386,7 @@ won't need them for much else. * Handler names live in a global namespace. * If two handler tasks have the same name, only one will run. `* <https://github.com/ansible/ansible/issues/4943>`_ + * You cannot notify a handler that is defined inside of an include Roles are described later on, but it's worthwhile to point out that: From a61387846d3e210181683a60df14c8e7cbf46893 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 7 Dec 2015 10:22:07 -0800 Subject: [PATCH 0082/1113] draft release documentation --- docsite/rst/developing_releases.rst | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 docsite/rst/developing_releases.rst diff --git a/docsite/rst/developing_releases.rst b/docsite/rst/developing_releases.rst new file mode 100644 index 00000000000..1eeb2421210 --- /dev/null +++ b/docsite/rst/developing_releases.rst @@ -0,0 +1,48 @@ +Releases +======== + +.. contents:: Topics + :local: + +.. schedule:: + +Release Schedule +```````````````` +Ansible is on a 'flexible' 4 month release schedule, sometimes this can be extended if there is a major change that requires a longer cycle (i.e. 2.0 core rewrite). +Currently modules get released at the same time as the main Ansible repo, even though they are separated into ansible-modules-core and ansible-modules-extras. + +The major features and bugs fixed in a release should be reflected in the CHANGELOG.md, minor ones will be in the commit history (FIXME: add git exmaple to list). +When a fix/feature gets added to the `devel` branch it will be part of the next release, some bugfixes can be backported to previous releases and might be part of a minor point release if it is deemed necessary. + +Sometimes an RC can be extended by a few days if a bugfix makes a change that can have far reaching consequences, so users have enough time to find any new issues that may stem from this. + +.. methods:: + +Release methods +```````````````` + +Ansible normally goes through a 'release candidate', issuing an RC1 for a release, if no major bugs are discovered in it after 5 business days we'll get a final release. +Otherwise fixes will be applied and an RC2 will be provided for testing and if no bugs after 2 days, the final release will be made, iterating this last step and incrementing the candidate number as we find major bugs. + + +.. freezing:: + +Release feature freeze +`````````````````````` + +During the release candidate process, the focus will be on bugfixes that affect the RC, new features will be delayed while we try to produce a final version. Some bugfixes that are minor or don't affect the RC will also be postponed until after the release is finalized. + +.. seealso:: + + :doc:`developing_api` + Python API to Playbooks and Ad Hoc Task Execution + :doc:`developing_modules` + How to develop modules + :doc:`developing_plugins` + How to develop plugins + `Ansible Tower <http://ansible.com/ansible-tower>`_ + REST API endpoint and GUI for Ansible, syncs with dynamic inventory + `Development Mailing List <http://groups.google.com/group/ansible-devel>`_ + Mailing list for development topics + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel From 2b363434514aa94aad145d2a6eacf4c1013490d8 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 9 Dec 2015 17:57:52 -0500 Subject: [PATCH 0083/1113] Missed one place we were appending the incorrectly escaped item to raw params --- lib/ansible/parsing/splitter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py index f24d8ecf9de..feb0cd2b34b 100644 --- a/lib/ansible/parsing/splitter.py +++ b/lib/ansible/parsing/splitter.py @@ -86,7 +86,7 @@ def parse_kv(args, check_raw=False): # FIXME: make the retrieval of this list of shell/command # options a function, so the list is centralized if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'): - raw_params.append(x) + raw_params.append(orig_x) else: options[k.strip()] = unquote(v.strip()) else: From 30e729557f0056ec561288046e2aa933efe899b3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Dec 2015 16:43:24 -0800 Subject: [PATCH 0084/1113] Add first draft of porting guide for 2.0 --- docsite/rst/porting_guide_2.0.rst | 160 ++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 docsite/rst/porting_guide_2.0.rst diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst new file mode 100644 index 00000000000..9c26a4b1611 --- /dev/null +++ b/docsite/rst/porting_guide_2.0.rst @@ -0,0 +1,160 @@ +Porting Guide +============= + + +Playbook +-------- + +* backslash escapes When specifying parameters in jinja2 expressions in YAML + dicts, backslashes sometimes needed to be escaped twice. This has been fixed + in 2.0.x so that escaping once works. The following example shows how + playbooks must be modified:: + + # Syntax in 1.9.x + - debug: + msg: "{{ 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') }}" + # Syntax in 2.0.x + - debug: + msg: "{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}" + + # Output: + "msg": "test1 1\\3" + +To make an escaped string that will work on all versions you have two options:: + +- debug: msg="{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}" + +uses key=value escaping which has not changed. The other option is to check for the ansible version:: + +"{{ (ansible_version|version_compare('ge', '2.0'))|ternary( 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') , 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') ) }}" + +* trailing newline When a string with a trailing newline was specified in the + playbook via yaml dict format, the trailing newline was stripped. When + specified in key=value format, the trailing newlines were kept. In v2, both + methods of specifying the string will keep the trailing newlines. If you + relied on the trailing newline being stripped, you can change your playbook + using the following as an example:: + + # Syntax in 1.9.x + vars: + message: > + Testing + some things + tasks: + - debug: + msg: "{{ message }}" + + # Syntax in 2.0.x + vars: + old_message: > + Testing + some things + message: "{{ old_messsage[:-1] }}" + - debug: + msg: "{{ message }}" + # Output + "msg": "Testing some things" + +* porting task includes + * More dynamic. Corner-case formats that were not supposed to work now do not, as expected. + * variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 + * variable precedence +* templating (variables in playbooks and template lookups) has improved with regard to keeping the original instead of turning everything into a string. + If you need the old behavior, quote the value to pass it around as a string. + Empty variables and variables set to null in yaml are no longer converted to empty strings. They will retain the value of `None`. + You can override the `null_representation` setting to an empty string in your config file by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable. +* Extras callbacks must be whitelisted in ansible.cfg. Copying is no longer necessary but whitelisting in ansible.cfg must be completed. +* dnf module has been rewritten. Some minor changes in behavior may be observed. +* win_updates has been rewritten and works as expected now. + +Deprecated +---------- + +While all items listed here will show a deprecation warning message, they still work as they did in 1.9.x. Please note that they will be removed in 2.2 (Ansible always waits two major releases to remove a deprecated feature). + +* Bare variables in with_ loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity. +* The ansible-galaxy text format requirements file. Users should use the YAML format for requirements instead. +* Undefined variables within a with_ loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error. +* Using variables for task parameters is unsafe and will be removed in a future version. For example:: + + - hosts: localhost + gather_facts: no + vars: + debug_params: + msg: "hello there" + tasks: + - debug: "{{debug_params}}" + +* Host patterns should use a comma (,) or colon (:) instead of a semicolon (;) to separate hosts/groups in the pattern. +* Ranges specified in host patterns should use the [x:y] syntax, instead of [x-y]. +* Playbooks using privilege escalation should always use “become*” options rather than the old su*/sudo* options. +* The “short form” for vars_prompt is no longer supported. +For example:: + +vars_prompt: + variable_name: "Prompt string" + +* Specifying variables at the top level of a task include statement is no longer supported. For example:: + + - include: foo.yml + a: 1 + +Should now be:: + +- include: foo.yml + args: + a: 1 + +* Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only. +* Bare variables in the `environment` dictionary (for plays/tasks/etc.) are no longer supported. Variables specified there should use the full variable syntax: ‘{{foo}}’. +* Tags should no longer be specified with other parameters in a task include. Instead, they should be specified as an option on the task. +For example:: + + - include: foo.yml tags=a,b,c + +Should be:: + + - include: foo.yml + tags: [a, b, c] + +* The first_available_file option on tasks has been deprecated. Users should use the with_first_found option or lookup (‘first_found’, …) plugin. + + +Porting plugins +=============== + +In ansible-1.9.x, you would generally copy an existing plugin to create a new one. Simply implementing the methods and attributes that the caller of the plugin expected made it a plugin of that type. In ansible-2.0, most plugins are implemented by subclassing a base class for each plugin type. This way the custom plugin does not need to contain methods which are not customized. + +.. note:: + +Lookup plugins +-------------- +* lookup plugins ; import version + + +Connection plugins +------------------ + +* connection plugins + +Action plugins +-------------- + +* action plugins + +Callback plugins +---------------- + +* callback plugins + +Connection plugins +------------------ + +* connection plugins + + +Porting custom scripts +====================== + +Custom scripts that used the ``ansible.runner.Runner`` API in 1.x have to be ported in 2.x. Please refer to: +https://github.com/ansible/ansible/blob/devel/docsite/rst/developing_api.rst From fe72fff57da967ff0e53c8026bcd94d67cdb59db Mon Sep 17 00:00:00 2001 From: Michael Scherer <misc@zarb.org> Date: Thu, 10 Dec 2015 01:58:17 +0100 Subject: [PATCH 0085/1113] Fix the markdown used for the Windows module section --- docsite/rst/developing_modules.rst | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index bdee4aa83dc..fde4b5704b6 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -538,24 +538,34 @@ Windows modules checklist #!powershell -then:: + then:: + <GPL header> -then:: + + then:: + # WANT_JSON # POWERSHELL_COMMON -then, to parse all arguments into a variable modules generally use:: + then, to parse all arguments into a variable modules generally use:: + $params = Parse-Args $args * Arguments: * Try and use state present and state absent like other modules * You need to check that all your mandatory args are present. You can do this using the builtin Get-AnsibleParam function. * Required arguments:: + $package = Get-AnsibleParam -obj $params -name name -failifempty $true + * Required arguments with name validation:: + $state = Get-AnsibleParam -obj $params -name "State" -ValidateSet "Present","Absent" -resultobj $resultobj -failifempty $true + * Optional arguments with name validation:: + $state = Get-AnsibleParam -obj $params -name "State" -default "Present" -ValidateSet "Present","Absent" + * the If "FailIfEmpty" is true, the resultobj parameter is used to specify the object returned to fail-json. You can also override the default message using $emptyattributefailmessage (for missing required attributes) and $ValidateSetErrorMessage (for attribute validation errors) * Look at existing modules for more examples of argument checking. @@ -586,7 +596,7 @@ Starting in 1.8 you can deprecate modules by renaming them with a preceding _, i _old_cloud.py, This will keep the module available but hide it from the primary docs and listing. You can also rename modules and keep an alias to the old name by using a symlink that starts with _. -This example allows the stat module to be called with fileinfo, making the following examples equivalent +This example allows the stat module to be called with fileinfo, making the following examples equivalent:: EXAMPLES = ''' ln -s stat.py _fileinfo.py From c20c1a6d490933fa2ec8961508735422f3a6adeb Mon Sep 17 00:00:00 2001 From: Robin Roth <robin-roth@online.de> Date: Thu, 10 Dec 2015 11:16:21 +0100 Subject: [PATCH 0086/1113] add depth option to ansible-pull Allows shallow checkouts in ansible-pull by adding `--depth 1` (or higher number) --- lib/ansible/cli/pull.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 593d601e8d4..67e89259303 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -80,6 +80,8 @@ class PullCLI(CLI): help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') + self.parser.add_option('--depth', dest='depth', default=None, + help='Depth of checkout, shallow checkout if greater or equal 1 . Defaults to full checkout.') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', @@ -154,6 +156,10 @@ class PullCLI(CLI): if self.options.verify: repo_opts += ' verify_commit=yes' + + if self.options.depth: + repo_opts += ' depth=%s' % self.options.depth + path = module_loader.find_plugin(self.options.module_name) if path is None: From 6680cc7052dd4ef5bb166008a18a57e0f156df95 Mon Sep 17 00:00:00 2001 From: Charles Paul <cpaul@ansible.com> Date: Thu, 10 Dec 2015 08:04:06 -0500 Subject: [PATCH 0087/1113] allow custom callbacks with adhoc cli for scripting missing import of CallbackBase --- lib/ansible/cli/__init__.py | 3 ++- lib/ansible/cli/adhoc.py | 4 +++- lib/ansible/executor/task_queue_manager.py | 11 +++++++++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index da1aabcc698..a934a3a8ee5 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -66,7 +66,7 @@ class CLI(object): LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) # -S (chop long lines) -X (disable termcap init and de-init) - def __init__(self, args): + def __init__(self, args, callback=None): """ Base init method for all command line programs """ @@ -75,6 +75,7 @@ class CLI(object): self.options = None self.parser = None self.action = None + self.callback = callback def set_action(self): """ diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 3de0e55b7bb..250241a848f 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -158,7 +158,9 @@ class AdHocCLI(CLI): play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) - if self.options.one_line: + if self.callback: + cb = self.callback + elif self.options.one_line: cb = 'oneline' else: cb = 'minimal' diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 74111382935..e2b29a5282c 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -34,6 +34,7 @@ from ansible.playbook.play_context import PlayContext from ansible.plugins import callback_loader, strategy_loader, module_loader from ansible.template import Templar from ansible.vars.hostvars import HostVars +from ansible.plugins.callback import CallbackBase try: from __main__ import display @@ -146,8 +147,14 @@ class TaskQueueManager: if self._stdout_callback is None: self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK - if self._stdout_callback not in callback_loader: - raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback) + if isinstance(self._stdout_callback, CallbackBase): + self._callback_plugins.append(self._stdout_callback) + stdout_callback_loaded = True + elif isinstance(self._stdout_callback, basestring): + if self._stdout_callback not in callback_loader: + raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback) + else: + raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin") for callback_plugin in callback_loader.all(class_only=True): if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0: From 72f0679f685dc6c79fe80736d2ca72f6778b8e5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Warcho=C5=82?= <lemniskata.bernoullego@gmail.com> Date: Thu, 10 Dec 2015 16:22:37 +0100 Subject: [PATCH 0088/1113] Explain how 'run_once' interacts with 'serial' --- docsite/rst/playbooks_delegation.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index c715adea361..fa808abb65b 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -182,13 +182,18 @@ This can be optionally paired with "delegate_to" to specify an individual host t delegate_to: web01.example.org When "run_once" is not used with "delegate_to" it will execute on the first host, as defined by inventory, -in the group(s) of hosts targeted by the play. e.g. webservers[0] if the play targeted "hosts: webservers". +in the group(s) of hosts targeted by the play - e.g. webservers[0] if the play targeted "hosts: webservers". -This approach is similar, although more concise and cleaner than applying a conditional to a task such as:: +This approach is similar to applying a conditional to a task such as:: - command: /opt/application/upgrade_db.py when: inventory_hostname == webservers[0] +.. note:: + When used together with "serial", tasks marked as "run_once" will be ran on one host in *each* serial batch. + If it's crucial that the task is run only once regardless of "serial" mode, use + :code:`inventory_hostname == my_group_name[0]` construct. + .. _local_playbooks: Local Playbooks From 1dda8158ff9aa5240e89711c7279c3d072e0e57e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 10 Dec 2015 07:28:58 -0800 Subject: [PATCH 0089/1113] become_pass needs to be bytes when it is passed to ssh. Fixes #13240 --- lib/ansible/plugins/connection/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index debe36bd320..4251f8a63e8 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -463,7 +463,7 @@ class Connection(ConnectionBase): if states[state] == 'awaiting_prompt': if self._flags['become_prompt']: display.debug('Sending become_pass in response to prompt') - stdin.write(self._play_context.become_pass + '\n') + stdin.write('{0}\n'.format(to_bytes(self._play_context.become_pass ))) self._flags['become_prompt'] = False state += 1 elif self._flags['become_success']: From bd9582d0721db3c6e5e24b08c747e02a6391a0a7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 10 Dec 2015 08:10:45 -0800 Subject: [PATCH 0090/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 0b5555b62cd..0d23b3df526 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 0b5555b62cd8d91fb4fa434217671f3acaebbf5a +Subproject commit 0d23b3df526875c8fc6edf94268f3aa850ec05f1 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index cbed6420094..51813e00333 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit cbed642009497ddaf19b5f578ab6c78da1356eda +Subproject commit 51813e003331c3341b07c5cda33346cada537a3b From c402325085c129ce289c73a808d8d6ac68df096d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Dec 2015 13:10:17 -0500 Subject: [PATCH 0091/1113] Fixing up docker integration tests a bit --- .../roles/test_docker/tasks/docker-tests.yml | 31 +++---------------- .../test_docker/tasks/registry-tests.yml | 11 ++----- 2 files changed, 8 insertions(+), 34 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml index 33ffe6c70ca..14e23f72dd5 100644 --- a/test/integration/roles/test_docker/tasks/docker-tests.yml +++ b/test/integration/roles/test_docker/tasks/docker-tests.yml @@ -8,7 +8,6 @@ image: busybox state: present pull: missing - docker_api_version: "1.14" - name: Run a small script in busybox docker: @@ -17,22 +16,12 @@ pull: always command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True - docker_api_version: "1.14" - -- name: Get the docker container id - shell: "docker ps | grep busybox | awk '{ print $1 }'" - register: container_id - name: Get the docker container ip - shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" - register: container_ip - -- name: Pause a few moments because docker is not reliable - pause: - seconds: 40 + set_fact: container_ip="{{docker_containers[0].NetworkSettings.IPAddress}}" - name: Try to access the server - shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + shell: "echo 'world' | nc {{ container_ip }} 2000" register: docker_output - name: check that the script ran @@ -49,22 +38,12 @@ TEST: hello command: '/bin/sh -c "nc -l -p 2000 -e xargs -n1 echo $TEST"' detach: True - docker_api_version: "1.14" - -- name: Get the docker container id - shell: "docker ps | grep busybox | awk '{ print $1 }'" - register: container_id - name: Get the docker container ip - shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" - register: container_ip - -- name: Pause a few moments because docker is not reliable - pause: - seconds: 40 + set_fact: container_ip="{{docker_containers[0].NetworkSettings.IPAddress}}" - name: Try to access the server - shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + shell: "echo 'world' | nc {{ container_ip }} 2000" register: docker_output - name: check that the script ran @@ -73,7 +52,7 @@ - "'hello world' in docker_output.stdout_lines" - name: Remove containers - shell: "docker rm $(docker ps -aq)" + shell: "docker rm -f $(docker ps -aq)" - name: Remove all images from the local docker shell: "docker rmi -f $(docker images -q)" diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml index 57b4d252774..1ef330da5f6 100644 --- a/test/integration/roles/test_docker/tasks/registry-tests.yml +++ b/test/integration/roles/test_docker/tasks/registry-tests.yml @@ -19,11 +19,8 @@ - name: Push docker image into the private registry command: "docker push localhost:5000/mine" -- name: Remove containers - shell: "docker rm $(docker ps -aq)" - - name: Remove all images from the local docker - shell: "docker rmi -f $(docker images -q)" + shell: "docker rmi -f {{image_id.stdout_lines[0]}}" - name: Get number of images in docker command: "docker images" @@ -41,7 +38,6 @@ state: present pull: missing insecure_registry: True - docker_api_version: "1.14" - name: Run a small script in the new image docker: @@ -51,7 +47,6 @@ command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True insecure_registry: True - docker_api_version: "1.14" - name: Get the docker container id shell: "docker ps | grep mine | awk '{ print $1 }'" @@ -76,8 +71,9 @@ - name: Remove containers - shell: "docker rm $(docker ps -aq)" + shell: "docker rm -f $(docker ps -aq)" +- shell: docker images -q - name: Remove all images from the local docker shell: "docker rmi -f $(docker images -q)" @@ -157,7 +153,6 @@ state: running command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True - docker_api_version: "1.14" - name: Get the docker container id shell: "docker ps | grep mine | awk '{ print $1 }'" From a6a58d6947912328fd48e26ea1335bd9314f0135 Mon Sep 17 00:00:00 2001 From: Charles Paul <cpaul@ansible.com> Date: Thu, 10 Dec 2015 16:39:27 -0500 Subject: [PATCH 0092/1113] fix default host for non vcd service types --- lib/ansible/module_utils/vca.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/vca.py b/lib/ansible/module_utils/vca.py index 56341ec5559..ef89d545569 100644 --- a/lib/ansible/module_utils/vca.py +++ b/lib/ansible/module_utils/vca.py @@ -108,7 +108,10 @@ class VcaAnsibleModule(AnsibleModule): def create_instance(self): service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE) - host = self.params.get('host', LOGIN_HOST.get('service_type')) + if service_type == 'vcd': + host = self.params['host'] + else: + host = LOGIN_HOST[service_type] username = self.params['username'] version = self.params.get('api_version') From 37c4e9aee34df2f421942e86c8afd1fef2bee5f6 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen <ams@2ndQuadrant.com> Date: Fri, 11 Dec 2015 07:11:48 +0530 Subject: [PATCH 0093/1113] Clean up debug logging around _low_level_execute_command We were logging the command to be executed many times, which made debug logs very hard to read. Now we do it only once. Also makes the logged ssh command line cut-and-paste-able (the lack of which has confused a number of people by now; the problem being that we pass the command as a single argument to execve(), so it doesn't need an extra level of quoting as it does when you try to run it by hand). --- lib/ansible/plugins/action/__init__.py | 25 ++++++------------------- lib/ansible/plugins/connection/ssh.py | 2 +- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 497143224a7..154404e474c 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -202,9 +202,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): tmp_mode = 0o755 cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode) - display.debug("executing _low_level_execute_command to create the tmp path") result = self._low_level_execute_command(cmd, sudoable=False) - display.debug("done with creation of tmp path") # error handling on this seems a little aggressive? if result['rc'] != 0: @@ -249,9 +247,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): cmd = self._connection._shell.remove(tmp_path, recurse=True) # If we have gotten here we have a working ssh configuration. # If ssh breaks we could leave tmp directories out on the remote system. - display.debug("calling _low_level_execute_command to remove the tmp path") self._low_level_execute_command(cmd, sudoable=False) - display.debug("done removing the tmp path") def _transfer_data(self, remote_path, data): ''' @@ -286,9 +282,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): ''' cmd = self._connection._shell.chmod(mode, path) - display.debug("calling _low_level_execute_command to chmod the remote path") res = self._low_level_execute_command(cmd, sudoable=sudoable) - display.debug("done with chmod call") return res def _remote_checksum(self, path, all_vars): @@ -299,9 +293,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): python_interp = all_vars.get('ansible_python_interpreter', 'python') cmd = self._connection._shell.checksum(path, python_interp) - display.debug("calling _low_level_execute_command to get the remote checksum") data = self._low_level_execute_command(cmd, sudoable=True) - display.debug("done getting the remote checksum") try: data2 = data['stdout'].strip().splitlines()[-1] if data2 == u'': @@ -329,9 +321,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): expand_path = '~%s' % self._play_context.become_user cmd = self._connection._shell.expand_user(expand_path) - display.debug("calling _low_level_execute_command to expand the remote user path") data = self._low_level_execute_command(cmd, sudoable=False) - display.debug("done expanding the remote user path") #initial_fragment = utils.last_non_blank_line(data['stdout']) initial_fragment = data['stdout'].strip().splitlines()[-1] @@ -448,9 +438,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): # specified in the play, not the sudo_user sudoable = False - display.debug("calling _low_level_execute_command() for command %s" % cmd) res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data) - display.debug("_low_level_execute_command returned ok") if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if self._play_context.become and self._play_context.become_user != 'root': @@ -498,21 +486,20 @@ class ActionBase(with_metaclass(ABCMeta, object)): if executable is not None: cmd = executable + ' -c ' + cmd - display.debug("in _low_level_execute_command() (%s)" % (cmd,)) + display.debug("_low_level_execute_command(): starting") if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) - display.debug("no command, exiting _low_level_execute_command()") + display.debug("_low_level_execute_command(): no command, exiting") return dict(stdout='', stderr='') allow_same_user = C.BECOME_ALLOW_SAME_USER same_user = self._play_context.become_user == self._play_context.remote_user if sudoable and self._play_context.become and (allow_same_user or not same_user): - display.debug("using become for this command") + display.debug("_low_level_execute_command(): using become for this command") cmd = self._play_context.make_become_cmd(cmd, executable=executable) - display.debug("executing the command %s through the connection" % cmd) + display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) - display.debug("command execution done: rc=%s" % (rc)) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type @@ -530,11 +517,11 @@ class ActionBase(with_metaclass(ABCMeta, object)): else: err = stderr - display.debug("stdout=%s, stderr=%s" % (stdout, stderr)) - display.debug("done with _low_level_execute_command() (%s)" % (cmd,)) if rc is None: rc = 0 + display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr)) + return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err) def _get_first_available_file(self, faf, of=None, searchdir='files'): diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index 4251f8a63e8..a2abcf20aee 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -319,7 +319,7 @@ class Connection(ConnectionBase): Starts the command and communicates with it until it ends. ''' - display_cmd = map(pipes.quote, cmd[:-1]) + [cmd[-1]] + display_cmd = map(pipes.quote, cmd) display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)), host=self.host) # Start the given command. If we don't need to pipeline data, we can try From bd0f9a4afc8406f71d65c50cda35a43549998fc1 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 10 Dec 2015 21:50:11 -0500 Subject: [PATCH 0094/1113] fix make complaint when git is not installed --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ac4c07f4314..f62cffb2df8 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ GIT_HASH := $(shell git log -n 1 --format="%h") GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[-_.\/]//g') GITINFO = .$(GIT_HASH).$(GIT_BRANCH) else -GITINFO = '' +GITINFO = "" endif ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD'),1) From 58072c92fb762881679c31d050d519ccd83cb209 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 11 Dec 2015 09:32:19 -0500 Subject: [PATCH 0095/1113] removed 'bare' example in environment now shows how to use explicit templating --- docsite/rst/playbooks_environment.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_environment.rst b/docsite/rst/playbooks_environment.rst index da050f007d5..f909bfcd6e6 100644 --- a/docsite/rst/playbooks_environment.rst +++ b/docsite/rst/playbooks_environment.rst @@ -31,7 +31,7 @@ The environment can also be stored in a variable, and accessed like so:: tasks: - apt: name=cobbler state=installed - environment: proxy_env + environment: "{{proxy_env}}" You can also use it at a playbook level:: From d9e510b19273d6a495e6694b6930e49de80f9500 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 11 Dec 2015 13:12:24 -0500 Subject: [PATCH 0096/1113] narrow down exception catching in block builds this was obscuring other errors and should have always been narrow scope --- lib/ansible/playbook/role/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 1c6b344a4fc..f308954f528 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -150,7 +150,7 @@ class Role(Base, Become, Conditional, Taggable): current_when = getattr(self, 'when')[:] current_when.extend(role_include.when) setattr(self, 'when', current_when) - + current_tags = getattr(self, 'tags')[:] current_tags.extend(role_include.tags) setattr(self, 'tags', current_tags) @@ -174,7 +174,7 @@ class Role(Base, Become, Conditional, Taggable): if task_data: try: self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader) - except: + except AssertionError: raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data) handler_data = self._load_role_yaml('handlers') From 97554fc222628057d7f3255ce2caac8dfe5d783f Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 11 Dec 2015 00:18:47 -0500 Subject: [PATCH 0097/1113] Fixing filter test for extract to use proper group --- test/integration/roles/test_filters/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_filters/tasks/main.yml b/test/integration/roles/test_filters/tasks/main.yml index af6c5d49def..cb1549d3f78 100644 --- a/test/integration/roles/test_filters/tasks/main.yml +++ b/test/integration/roles/test_filters/tasks/main.yml @@ -77,4 +77,4 @@ - "31 == ['x','y']|map('extract',{'x':42,'y':31})|list|last" - "'local' == ['localhost']|map('extract',hostvars,'ansible_connection')|list|first" - "'local' == ['localhost']|map('extract',hostvars,['ansible_connection'])|list|first" - - "'ungrouped' == ['localhost']|map('extract',hostvars,['vars','group_names',0])|list|first" + - "'amazon' == ['localhost']|map('extract',hostvars,['vars','group_names',0])|list|first" From 7f7e730dea36dbb709b47c39ca1a28cb9f6cb3f1 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 11 Dec 2015 14:55:44 -0500 Subject: [PATCH 0098/1113] Don't mark hosts failed if they've moved to a rescue portion of a block Fixes #13521 --- lib/ansible/plugins/strategy/__init__.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 15636b580d1..91ca4e86383 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -30,6 +30,11 @@ from jinja2.exceptions import UndefinedError from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable +<<<<<<< Updated upstream +======= +from ansible.executor.play_iterator import PlayIterator +from ansible.executor.process.worker import WorkerProcess +>>>>>>> Stashed changes from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group @@ -202,8 +207,10 @@ class StrategyBase: [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts] else: iterator.mark_host_failed(host) - self._tqm._failed_hosts[host.name] = True - self._tqm._stats.increment('failures', host.name) + (state, tmp_task) = iterator.get_next_task_for_host(host, peek=True) + if state.run_state != PlayIterator.ITERATING_RESCUE: + self._tqm._failed_hosts[host.name] = True + self._tqm._stats.increment('failures', host.name) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) From de71171fc21a81a343eb28ed25472ef4aa17406c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 11 Dec 2015 15:10:48 -0500 Subject: [PATCH 0099/1113] removed merge conflict --- lib/ansible/plugins/strategy/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 91ca4e86383..5d31a3dba8d 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -30,11 +30,7 @@ from jinja2.exceptions import UndefinedError from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable -<<<<<<< Updated upstream -======= from ansible.executor.play_iterator import PlayIterator -from ansible.executor.process.worker import WorkerProcess ->>>>>>> Stashed changes from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group From ae988ed753f69cb2a7bf115c7cee41e53f01ef3e Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 11 Dec 2015 15:35:57 -0500 Subject: [PATCH 0100/1113] avoid set to unique hosts to preserver order swiched to using a list comp and set to still unique but keep expected order fixes #13522 --- lib/ansible/inventory/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 3c1331e7065..95e193f381a 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -194,7 +194,8 @@ class Inventory(object): if self._restriction is not None: hosts = [ h for h in hosts if h in self._restriction ] - HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) + seen = set() + HOSTS_PATTERNS_CACHE[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)] return HOSTS_PATTERNS_CACHE[pattern_hash][:] From 120b9a7ac6274c54d091291587b0c9ec865905a1 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Dec 2015 18:03:25 -0500 Subject: [PATCH 0101/1113] Changing the way workers are forked --- bin/ansible | 1 + lib/ansible/executor/process/worker.py | 108 ++++++++------------- lib/ansible/executor/task_queue_manager.py | 31 +----- lib/ansible/plugins/strategy/__init__.py | 48 ++++----- lib/ansible/plugins/strategy/linear.py | 5 +- 5 files changed, 70 insertions(+), 123 deletions(-) diff --git a/bin/ansible b/bin/ansible index 7e1aa01a932..627510a72e8 100755 --- a/bin/ansible +++ b/bin/ansible @@ -60,6 +60,7 @@ if __name__ == '__main__': try: display = Display() + display.debug("starting run") sub = None try: diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py index a1a83a5ddaa..73f5faa78b6 100644 --- a/lib/ansible/executor/process/worker.py +++ b/lib/ansible/executor/process/worker.py @@ -59,14 +59,18 @@ class WorkerProcess(multiprocessing.Process): for reading later. ''' - def __init__(self, tqm, main_q, rslt_q, hostvars_manager, loader): + def __init__(self, rslt_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj): super(WorkerProcess, self).__init__() # takes a task queue manager as the sole param: - self._main_q = main_q - self._rslt_q = rslt_q - self._hostvars = hostvars_manager - self._loader = loader + self._rslt_q = rslt_q + self._task_vars = task_vars + self._host = host + self._task = task + self._play_context = play_context + self._loader = loader + self._variable_manager = variable_manager + self._shared_loader_obj = shared_loader_obj # dupe stdin, if we have one self._new_stdin = sys.stdin @@ -97,73 +101,45 @@ class WorkerProcess(multiprocessing.Process): if HAS_ATFORK: atfork() - while True: - task = None - try: - #debug("waiting for work") - (host, task, basedir, zip_vars, compressed_vars, play_context, shared_loader_obj) = self._main_q.get(block=False) + try: + # execute the task and build a TaskResult from the result + debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) + executor_result = TaskExecutor( + self._host, + self._task, + self._task_vars, + self._play_context, + self._new_stdin, + self._loader, + self._shared_loader_obj, + ).run() - if compressed_vars: - job_vars = json.loads(zlib.decompress(zip_vars)) - else: - job_vars = zip_vars + debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) + self._host.vars = dict() + self._host.groups = [] + task_result = TaskResult(self._host, self._task, executor_result) - job_vars['hostvars'] = self._hostvars.hostvars() + # put the result on the result queue + debug("sending task result") + self._rslt_q.put(task_result) + debug("done sending task result") - debug("there's work to be done! got a task/handler to work on: %s" % task) + except AnsibleConnectionFailure: + self._host.vars = dict() + self._host.groups = [] + task_result = TaskResult(self._host, self._task, dict(unreachable=True)) + self._rslt_q.put(task_result, block=False) - # because the task queue manager starts workers (forks) before the - # playbook is loaded, set the basedir of the loader inherted by - # this fork now so that we can find files correctly - self._loader.set_basedir(basedir) - - # Serializing/deserializing tasks does not preserve the loader attribute, - # since it is passed to the worker during the forking of the process and - # would be wasteful to serialize. So we set it here on the task now, and - # the task handles updating parent/child objects as needed. - task.set_loader(self._loader) - - # execute the task and build a TaskResult from the result - debug("running TaskExecutor() for %s/%s" % (host, task)) - executor_result = TaskExecutor( - host, - task, - job_vars, - play_context, - self._new_stdin, - self._loader, - shared_loader_obj, - ).run() - debug("done running TaskExecutor() for %s/%s" % (host, task)) - task_result = TaskResult(host, task, executor_result) - - # put the result on the result queue - debug("sending task result") - self._rslt_q.put(task_result) - debug("done sending task result") - - except queue.Empty: - time.sleep(0.0001) - except AnsibleConnectionFailure: + except Exception as e: + if not isinstance(e, (IOError, EOFError, KeyboardInterrupt)) or isinstance(e, TemplateNotFound): try: - if task: - task_result = TaskResult(host, task, dict(unreachable=True)) - self._rslt_q.put(task_result, block=False) + self._host.vars = dict() + self._host.groups = [] + task_result = TaskResult(self._host, self._task, dict(failed=True, exception=traceback.format_exc(), stdout='')) + self._rslt_q.put(task_result, block=False) except: - break - except Exception as e: - if isinstance(e, (IOError, EOFError, KeyboardInterrupt)) and not isinstance(e, TemplateNotFound): - break - else: - try: - if task: - task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout='')) - self._rslt_q.put(task_result, block=False) - except: - debug("WORKER EXCEPTION: %s" % e) - debug("WORKER EXCEPTION: %s" % traceback.format_exc()) - break + debug("WORKER EXCEPTION: %s" % e) + debug("WORKER EXCEPTION: %s" % traceback.format_exc()) debug("WORKER PROCESS EXITING") - diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index e2b29a5282c..9189ab95819 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -102,11 +102,7 @@ class TaskQueueManager: for i in xrange(num): main_q = multiprocessing.Queue() rslt_q = multiprocessing.Queue() - - prc = WorkerProcess(self, main_q, rslt_q, self._hostvars_manager, self._loader) - prc.start() - - self._workers.append((prc, main_q, rslt_q)) + self._workers.append([None, main_q, rslt_q]) self._result_prc = ResultProcess(self._final_q, self._workers) self._result_prc.start() @@ -195,31 +191,12 @@ class TaskQueueManager: new_play = play.copy() new_play.post_validate(templar) - class HostVarsManager(SyncManager): - pass - - hostvars = HostVars( + self.hostvars = HostVars( inventory=self._inventory, variable_manager=self._variable_manager, loader=self._loader, ) - HostVarsManager.register( - 'hostvars', - callable=lambda: hostvars, - # FIXME: this is the list of exposed methods to the DictProxy object, plus our - # special ones (set_variable_manager/set_inventory). There's probably a better way - # to do this with a proper BaseProxy/DictProxy derivative - exposed=( - 'set_variable_manager', 'set_inventory', '__contains__', '__delitem__', - 'set_nonpersistent_facts', 'set_host_facts', 'set_host_variable', - '__getitem__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', - 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' - ), - ) - self._hostvars_manager = HostVarsManager() - self._hostvars_manager.start() - # Fork # of forks, # of hosts or serial, whichever is lowest contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))] contenders = [ v for v in contenders if v is not None and v > 0 ] @@ -259,7 +236,6 @@ class TaskQueueManager: # and run the play using the strategy and cleanup on way out play_return = strategy.run(iterator, play_context) self._cleanup_processes() - self._hostvars_manager.shutdown() return play_return def cleanup(self): @@ -275,7 +251,8 @@ class TaskQueueManager: for (worker_prc, main_q, rslt_q) in self._workers: rslt_q.close() main_q.close() - worker_prc.terminate() + if worker_prc and worker_prc.is_alive(): + worker_prc.terminate() def clear_failed_hosts(self): self._failed_hosts = dict() diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 5d31a3dba8d..ea30b800b02 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -31,6 +31,7 @@ from jinja2.exceptions import UndefinedError from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable from ansible.executor.play_iterator import PlayIterator +from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group @@ -138,38 +139,29 @@ class StrategyBase: display.debug("entering _queue_task() for %s/%s" % (host, task)) + task_vars['hostvars'] = self._tqm.hostvars # and then queue the new task display.debug("%s - putting task (%s) in queue" % (host, task)) try: display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers))) - (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] - self._cur_worker += 1 - if self._cur_worker >= len(self._workers): - self._cur_worker = 0 - # create a dummy object with plugin loaders set as an easier # way to share them with the forked processes shared_loader_obj = SharedPluginLoaderObj() - # compress (and convert) the data if so configured, which can - # help a lot when the variable dictionary is huge. We pop the - # hostvars out of the task variables right now, due to the fact - # that they're not JSON serializable - compressed_vars = False - if C.DEFAULT_VAR_COMPRESSION_LEVEL > 0: - zip_vars = zlib.compress(json.dumps(task_vars), C.DEFAULT_VAR_COMPRESSION_LEVEL) - compressed_vars = True - # we're done with the original dict now, so delete it to - # try and reclaim some memory space, which is helpful if the - # data contained in the dict is very large - del task_vars - else: - zip_vars = task_vars # noqa (pyflakes false positive because task_vars is deleted in the conditional above) - - # and queue the task - main_q.put((host, task, self._loader.get_basedir(), zip_vars, compressed_vars, play_context, shared_loader_obj)) + while True: + (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] + if worker_prc is None or not worker_prc.is_alive(): + worker_prc = WorkerProcess(rslt_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj) + self._workers[self._cur_worker][0] = worker_prc + worker_prc.start() + break + self._cur_worker += 1 + if self._cur_worker >= len(self._workers): + self._cur_worker = 0 + time.sleep(0.0001) + del task_vars self._pending_results += 1 except (EOFError, IOError, AssertionError) as e: # most likely an abort @@ -177,7 +169,7 @@ class StrategyBase: return display.debug("exiting _queue_task() for %s/%s" % (host, task)) - def _process_pending_results(self, iterator): + def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). @@ -247,13 +239,11 @@ class StrategyBase: new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) - self._tqm._hostvars_manager.hostvars().set_inventory(self._inventory) elif result[0] == 'add_group': host = result[1] result_item = result[2] self._add_group(host, result_item) - self._tqm._hostvars_manager.hostvars().set_inventory(self._inventory) elif result[0] == 'notify_handler': task_result = result[1] @@ -283,7 +273,6 @@ class StrategyBase: for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value}) - self._tqm._hostvars_manager.hostvars().set_nonpersistent_facts(target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): host = result[1] @@ -316,21 +305,22 @@ class StrategyBase: for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) - self._tqm._hostvars_manager.hostvars().set_host_variable(target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] if task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(actual_host, facts) - self._tqm._hostvars_manager.hostvars().set_nonpersistent_facts(actual_host, facts) else: self._variable_manager.set_host_facts(actual_host, facts) - self._tqm._hostvars_manager.hostvars().set_host_facts(actual_host, facts) else: raise AnsibleError("unknown result message received: %s" % result[0]) + except Queue.Empty: time.sleep(0.0001) + if one_pass: + break + return ret_results def _wait_on_pending_results(self, iterator): diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 8a8d5c084af..8c94267cf46 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -169,6 +169,7 @@ class StrategyModule(StrategyBase): skip_rest = False choose_step = True + results = [] for (host, task) in host_tasks: if not task: continue @@ -243,12 +244,14 @@ class StrategyModule(StrategyBase): if run_once: break + results += self._process_pending_results(iterator, one_pass=True) + # go to next host/task group if skip_rest: continue display.debug("done queuing things up, now waiting for results queue to drain") - results = self._wait_on_pending_results(iterator) + results += self._wait_on_pending_results(iterator) host_results.extend(results) if not work_to_do and len(iterator.get_failed_hosts()) > 0: From 8db291274519331ed186f0b9dc0711f6754cb25d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 12 Dec 2015 12:59:00 -0500 Subject: [PATCH 0102/1113] corrected section anchors --- docsite/rst/developing_releases.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/developing_releases.rst b/docsite/rst/developing_releases.rst index 1eeb2421210..2332459c30d 100644 --- a/docsite/rst/developing_releases.rst +++ b/docsite/rst/developing_releases.rst @@ -4,7 +4,7 @@ Releases .. contents:: Topics :local: -.. schedule:: +.. _schedule: Release Schedule ```````````````` @@ -16,7 +16,7 @@ When a fix/feature gets added to the `devel` branch it will be part of the next Sometimes an RC can be extended by a few days if a bugfix makes a change that can have far reaching consequences, so users have enough time to find any new issues that may stem from this. -.. methods:: +.. _methods: Release methods ```````````````` @@ -25,7 +25,7 @@ Ansible normally goes through a 'release candidate', issuing an RC1 for a releas Otherwise fixes will be applied and an RC2 will be provided for testing and if no bugs after 2 days, the final release will be made, iterating this last step and incrementing the candidate number as we find major bugs. -.. freezing:: +.. _freezing: Release feature freeze `````````````````````` From 0a112a1b0617d4087ae3e46ea031101af204d48e Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 12 Dec 2015 13:14:14 -0500 Subject: [PATCH 0103/1113] fixed formating issues with rst --- docsite/rst/porting_guide_2.0.rst | 44 +++++++++++++++---------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 9c26a4b1611..8d69ecd4403 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -56,12 +56,11 @@ uses key=value escaping which has not changed. The other option is to check for "msg": "Testing some things" * porting task includes - * More dynamic. Corner-case formats that were not supposed to work now do not, as expected. - * variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 - * variable precedence +* More dynamic. Corner-case formats that were not supposed to work now do not, as expected. +* variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 * templating (variables in playbooks and template lookups) has improved with regard to keeping the original instead of turning everything into a string. - If you need the old behavior, quote the value to pass it around as a string. - Empty variables and variables set to null in yaml are no longer converted to empty strings. They will retain the value of `None`. + If you need the old behavior, quote the value to pass it around as a string. +* Empty variables and variables set to null in yaml are no longer converted to empty strings. They will retain the value of `None`. You can override the `null_representation` setting to an empty string in your config file by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable. * Extras callbacks must be whitelisted in ansible.cfg. Copying is no longer necessary but whitelisting in ansible.cfg must be completed. * dnf module has been rewritten. Some minor changes in behavior may be observed. @@ -72,26 +71,26 @@ Deprecated While all items listed here will show a deprecation warning message, they still work as they did in 1.9.x. Please note that they will be removed in 2.2 (Ansible always waits two major releases to remove a deprecated feature). -* Bare variables in with_ loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity. +* Bare variables in `with_` loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity. * The ansible-galaxy text format requirements file. Users should use the YAML format for requirements instead. -* Undefined variables within a with_ loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error. +* Undefined variables within a `with_` loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error. * Using variables for task parameters is unsafe and will be removed in a future version. For example:: - hosts: localhost - gather_facts: no - vars: - debug_params: - msg: "hello there" - tasks: - - debug: "{{debug_params}}" + gather_facts: no + vars: + debug_params: + msg: "hello there" + tasks: + - debug: "{{debug_params}}" * Host patterns should use a comma (,) or colon (:) instead of a semicolon (;) to separate hosts/groups in the pattern. * Ranges specified in host patterns should use the [x:y] syntax, instead of [x-y]. * Playbooks using privilege escalation should always use “become*” options rather than the old su*/sudo* options. -* The “short form” for vars_prompt is no longer supported. -For example:: +* The “short form” for vars_prompt is no longer supported. + For example:: -vars_prompt: + vars_prompt: variable_name: "Prompt string" * Specifying variables at the top level of a task include statement is no longer supported. For example:: @@ -101,21 +100,21 @@ vars_prompt: Should now be:: -- include: foo.yml - args: - a: 1 + - include: foo.yml + args: + a: 1 * Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only. * Bare variables in the `environment` dictionary (for plays/tasks/etc.) are no longer supported. Variables specified there should use the full variable syntax: ‘{{foo}}’. * Tags should no longer be specified with other parameters in a task include. Instead, they should be specified as an option on the task. -For example:: + For example:: - include: foo.yml tags=a,b,c -Should be:: + Should be:: - include: foo.yml - tags: [a, b, c] + tags: [a, b, c] * The first_available_file option on tasks has been deprecated. Users should use the with_first_found option or lookup (‘first_found’, …) plugin. @@ -125,7 +124,6 @@ Porting plugins In ansible-1.9.x, you would generally copy an existing plugin to create a new one. Simply implementing the methods and attributes that the caller of the plugin expected made it a plugin of that type. In ansible-2.0, most plugins are implemented by subclassing a base class for each plugin type. This way the custom plugin does not need to contain methods which are not customized. -.. note:: Lookup plugins -------------- From d7b516f75dc879ad350b285e7ddc398418bf85fd Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 12 Dec 2015 13:16:40 -0500 Subject: [PATCH 0104/1113] added releases doc --- docsite/rst/developing.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/developing.rst b/docsite/rst/developing.rst index 2a258993019..c5a1dca0611 100644 --- a/docsite/rst/developing.rst +++ b/docsite/rst/developing.rst @@ -11,6 +11,7 @@ Learn how to build modules of your own in any language, and also how to extend A developing_modules developing_plugins developing_test_pr + developing_releases Developers will also likely be interested in the fully-discoverable in :doc:`tower`. It's great for embedding Ansible in all manner of applications. From 8e445c551a23f52e901c9b1d2603e496a2e88c11 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 12 Dec 2015 13:43:10 -0500 Subject: [PATCH 0105/1113] removed unused imports in galaxy/cli --- lib/ansible/cli/galaxy.py | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 01e0475b24b..0f9074da935 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -25,7 +25,6 @@ __metaclass__ = type import os.path import sys import yaml -import json import time from collections import defaultdict @@ -40,7 +39,6 @@ from ansible.galaxy.role import GalaxyRole from ansible.galaxy.login import GalaxyLogin from ansible.galaxy.token import GalaxyToken from ansible.playbook.role.requirement import RoleRequirement -from ansible.module_utils.urls import open_url try: from __main__ import display @@ -61,10 +59,10 @@ class GalaxyCLI(CLI): "remove": "delete a role from your roles path", "search": "query the Galaxy API", "setup": "add a TravisCI integration to Galaxy", - } + } SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - + def __init__(self, args): self.VALID_ACTIONS = self.available_commands.keys() self.VALID_ACTIONS.sort() @@ -101,7 +99,7 @@ class GalaxyCLI(CLI): usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS), epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) - + self.set_action() # options specific to actions @@ -131,7 +129,7 @@ class GalaxyCLI(CLI): self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') self.parser.add_option('-r', '--role-file', dest='role_file', - help='A file containing a list of roles to be imported') + help='A file containing a list of roles to be imported') elif self.action == "remove": self.parser.set_usage("usage: %prog remove role1 role2 ...") elif self.action == "list": @@ -190,7 +188,7 @@ class GalaxyCLI(CLI): # if not offline, get connect to galaxy api if self.action in ("import","info","install","search","login","setup","delete") or \ - (self.action == 'init' and not self.options.offline): + (self.action == 'init' and not self.options.offline): self.api = GalaxyAPI(self.galaxy) self.execute() @@ -544,7 +542,7 @@ class GalaxyCLI(CLI): def execute_search(self): page_size = 1000 search = None - + if len(self.args): terms = [] for i in range(len(self.args)): @@ -556,7 +554,7 @@ class GalaxyCLI(CLI): response = self.api.search_roles(search, platforms=self.options.platforms, tags=self.options.tags, author=self.options.author, page_size=page_size) - + if response['count'] == 0: display.display("No roles match your search.", color="yellow") return True @@ -578,7 +576,7 @@ class GalaxyCLI(CLI): data += (format_str % ("----", "-----------")) for role in response['results']: data += (format_str % (role['username'] + '.' + role['name'],role['description'])) - + self.pager(data) return True @@ -595,12 +593,12 @@ class GalaxyCLI(CLI): github_token = self.options.token galaxy_response = self.api.authenticate(github_token) - + if self.options.token is None: # Remove the token we created login.remove_github_token() - - # Store the Galaxy token + + # Store the Galaxy token token = GalaxyToken() token.set(galaxy_response['token']) @@ -611,7 +609,7 @@ class GalaxyCLI(CLI): """ Import a role into Galaxy """ - + colors = { 'INFO': 'normal', 'WARNING': 'yellow', @@ -631,7 +629,7 @@ class GalaxyCLI(CLI): else: # Submit an import request task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference) - + if len(task) > 1: # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), @@ -693,7 +691,7 @@ class GalaxyCLI(CLI): if len(self.args) < 4: raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret") return 0 - + secret = self.args.pop() github_repo = self.args.pop() github_user = self.args.pop() @@ -711,7 +709,7 @@ class GalaxyCLI(CLI): if len(self.args) < 2: raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo") - + github_repo = self.args.pop() github_user = self.args.pop() resp = self.api.delete_role(github_user, github_repo) @@ -722,9 +720,8 @@ class GalaxyCLI(CLI): display.display("------ --------------- ----------") for role in resp['deleted_roles']: display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name)) - + display.display(resp['status']) return True - From 3c4d2fc6f2cdeba074511fb591134014cf77032d Mon Sep 17 00:00:00 2001 From: Michael Scherer <misc@zarb.org> Date: Sat, 12 Dec 2015 19:31:19 +0100 Subject: [PATCH 0106/1113] Add tests for ansible.module_utils.known_hosts --- .../module_utils/basic/test_known_hosts.py | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 test/units/module_utils/basic/test_known_hosts.py diff --git a/test/units/module_utils/basic/test_known_hosts.py b/test/units/module_utils/basic/test_known_hosts.py new file mode 100644 index 00000000000..952184bfec9 --- /dev/null +++ b/test/units/module_utils/basic/test_known_hosts.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Michael Scherer <mscherer@redhat.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from ansible.compat.tests import unittest +from ansible.module_utils import known_hosts + +class TestAnsibleModuleKnownHosts(unittest.TestCase): + urls = { + 'ssh://one.example.org/example.git': + {'is_ssh_url': True, 'get_fqdn': 'one.example.org'}, + 'ssh+git://two.example.org/example.git': + {'is_ssh_url': True, 'get_fqdn': 'two.example.org'}, + 'rsync://three.example.org/user/example.git': + {'is_ssh_url': False, 'get_fqdn': 'three.example.org'}, + 'git@four.example.org:user/example.git': + {'is_ssh_url': True, 'get_fqdn': 'four.example.org'}, + 'git+ssh://five.example.org/example.git': + {'is_ssh_url': True, 'get_fqdn': 'five.example.org'}, + 'ssh://six.example.org:21/example.org': + {'is_ssh_url': True, 'get_fqdn': 'six.example.org'}, + } + + def test_is_ssh_url(self): + for u in self.urls: + self.assertEqual(known_hosts.is_ssh_url(u), self.urls[u]['is_ssh_url']) + + def test_get_fqdn(self): + for u in self.urls: + self.assertEqual(known_hosts.get_fqdn(u), self.urls[u]['get_fqdn']) + + + From 99e46440bdaf622958f78cebecb52dec7ed67669 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 12 Dec 2015 16:10:18 -0500 Subject: [PATCH 0107/1113] changed shell delimiters for csh fixes #13459 --- lib/ansible/plugins/shell/csh.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/plugins/shell/csh.py b/lib/ansible/plugins/shell/csh.py index 1c383d133c6..bd210f12feb 100644 --- a/lib/ansible/plugins/shell/csh.py +++ b/lib/ansible/plugins/shell/csh.py @@ -24,6 +24,8 @@ class ShellModule(ShModule): # How to end lines in a python script one-liner _SHELL_EMBEDDED_PY_EOL = '\\\n' _SHELL_REDIRECT_ALLNULL = '>& /dev/null' + _SHELL_SUB_LEFT = '"`' + _SHELL_SUB_RIGHT = '`"' def env_prefix(self, **kwargs): return 'env %s' % super(ShellModule, self).env_prefix(**kwargs) From f3bedbae2991b540421d64f5be942ec7c84fdf7d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 12 Dec 2015 17:50:55 -0500 Subject: [PATCH 0108/1113] simplified skippy thanks agaffney! --- lib/ansible/plugins/callback/skippy.py | 159 +------------------------ 1 file changed, 6 insertions(+), 153 deletions(-) diff --git a/lib/ansible/plugins/callback/skippy.py b/lib/ansible/plugins/callback/skippy.py index 495943417fd..306d1a534e5 100644 --- a/lib/ansible/plugins/callback/skippy.py +++ b/lib/ansible/plugins/callback/skippy.py @@ -19,10 +19,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible import constants as C -from ansible.plugins.callback import CallbackBase +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default -class CallbackModule(CallbackBase): +class CallbackModule(CallbackModule_default): ''' This is the default callback interface, which simply prints messages @@ -33,154 +32,8 @@ class CallbackModule(CallbackBase): CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'skippy' - def v2_runner_on_failed(self, result, ignore_errors=False): - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if 'exception' in result._result: - if self._display.verbosity < 3: - # extract just the actual error message from the exception text - error = result._result['exception'].strip().split('\n')[-1] - msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error - else: - msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - - self._display.display(msg, color='red') - - # finally, remove the exception from the result so it's not shown every time - del result._result['exception'] - - if result._task.loop and 'results' in result._result: - self._process_items(result) - else: - if delegated_vars: - self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') - else: - self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') - - if result._task.ignore_errors: - self._display.display("...ignoring", color='cyan') - - def v2_runner_on_ok(self, result): - - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if result._task.action == 'include': - return - elif result._result.get('changed', False): - if delegated_vars: - msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' - else: - if delegated_vars: - msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "ok: [%s]" % result._host.get_name() - color = 'green' - - if result._task.loop and 'results' in result._result: - self._process_items(result) - else: - - if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: - msg += " => %s" % (self._dump_results(result._result),) - self._display.display(msg, color=color) - - self._handle_warnings(result._result) - - def v2_runner_on_unreachable(self, result): - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if delegated_vars: - self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') - else: - self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') - - def v2_playbook_on_no_hosts_matched(self): - self._display.display("skipping: no hosts matched", color='cyan') - - def v2_playbook_on_no_hosts_remaining(self): - self._display.banner("NO MORE HOSTS LEFT") - - def v2_playbook_on_task_start(self, task, is_conditional): - self._display.banner("TASK [%s]" % task.get_name().strip()) - if self._display.verbosity > 2: - path = task.get_path() - if path: - self._display.display("task path: %s" % path, color='dark gray') - - def v2_playbook_on_cleanup_task_start(self, task): - self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip()) - - def v2_playbook_on_handler_task_start(self, task): - self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip()) - - def v2_playbook_on_play_start(self, play): - name = play.get_name().strip() - if not name: - msg = "PLAY" - else: - msg = "PLAY [%s]" % name - - self._display.banner(msg) - - def v2_on_file_diff(self, result): - if result._task.loop and 'results' in result._result: - for res in result._result['results']: - newres = self._copy_result(result) - res['item'] = self._get_item(res) - newres._result = res - - self.v2_on_file_diff(newres) - elif 'diff' in result._result and result._result['diff']: - self._display.display(self._get_diff(result._result['diff'])) - - def v2_playbook_item_on_ok(self, result): - - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if result._task.action == 'include': - return - elif result._result.get('changed', False): - if delegated_vars: - msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' - else: - if delegated_vars: - msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "ok: [%s]" % result._host.get_name() - color = 'green' - - msg += " => (item=%s)" % (result._result['item'],) - - if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: - msg += " => %s" % self._dump_results(result._result) - self._display.display(msg, color=color) - - def v2_playbook_item_on_failed(self, result): - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if 'exception' in result._result: - if self._display.verbosity < 3: - # extract just the actual error message from the exception text - error = result._result['exception'].strip().split('\n')[-1] - msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error - else: - msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - - self._display.display(msg, color='red') - - # finally, remove the exception from the result so it's not shown every time - del result._result['exception'] - - if delegated_vars: - self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red') - else: - self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red') - - self._handle_warnings(result._result) - - def v2_playbook_on_include(self, included_file): - msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) - color = 'cyan' - self._display.display(msg, color='cyan') + def v2_runner_on_skipped(self, result): + pass + def v2_playbook_item_on_skipped(self, result): + pass From d73562902b289e7fd7e2e5a37e82b00c83a16369 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sun, 13 Dec 2015 00:13:13 -0500 Subject: [PATCH 0109/1113] debug now validates its params simplified var handling made default message the same as in pre 2.0 fixes #13532 --- lib/ansible/plugins/action/debug.py | 35 ++++++++++++++++------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index a0ffb714044..2af20eddfc4 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -20,40 +20,45 @@ __metaclass__ = type from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean from ansible.utils.unicode import to_unicode +from ansible.errors import AnsibleUndefinedVariable class ActionModule(ActionBase): ''' Print statements during execution ''' TRANSFERS_FILES = False + VALID_ARGS = set(['msg', 'var']) def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() + for arg in self._task.args: + if arg not in self.VALID_ARGS: + return {"failed": True, "msg": "'%s' is not a valid option in debug" % arg} + + if 'msg' in self._task.args and 'var' in self._task.args: + return {"failed": True, "msg": "'msg' and 'var' are incompatible options"} + result = super(ActionModule, self).run(tmp, task_vars) if 'msg' in self._task.args: - if 'fail' in self._task.args and boolean(self._task.args['fail']): - result['failed'] = True - result['msg'] = self._task.args['msg'] - else: - result['msg'] = self._task.args['msg'] - # FIXME: move the LOOKUP_REGEX somewhere else - elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): - results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=False) + result['msg'] = self._task.args['msg'] + + elif 'var' in self._task.args: + try: + results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True) + if results == self._task.args['var']: + raise AnsibleUndefinedVariable + except AnsibleUndefinedVariable: + results = "VARIABLE IS NOT DEFINED!" + if type(self._task.args['var']) in (list, dict): # If var is a list or dict, use the type as key to display result[to_unicode(type(self._task.args['var']))] = results else: - # If var name is same as result, try to template it - if results == self._task.args['var']: - try: - results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True) - except: - results = "VARIABLE IS NOT DEFINED!" result[self._task.args['var']] = results else: - result['msg'] = 'here we are' + result['msg'] = 'Hello world!' # force flag to make debug output module always verbose result['_ansible_verbose_always'] = True From e2ad4fe9100729462fbd511c75a035ccdfd41841 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sun, 13 Dec 2015 00:34:23 -0500 Subject: [PATCH 0110/1113] include all packaging in tarball not juse rpm spec file --- MANIFEST.in | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index d8402f0297f..64c5bf1fcba 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,12 +4,13 @@ prune ticket_stubs prune packaging prune test prune hacking -include README.md packaging/rpm/ansible.spec COPYING +include README.md COPYING include examples/hosts include examples/ansible.cfg include lib/ansible/module_utils/powershell.ps1 recursive-include lib/ansible/modules * recursive-include docs * +recursive-include packaging * include Makefile include VERSION include MANIFEST.in From 4779f29777872f1352c65ea504eb81e998a47b7b Mon Sep 17 00:00:00 2001 From: Usman Ehtesham Gul <uehtesham90@gmail.com> Date: Sun, 13 Dec 2015 01:24:27 -0500 Subject: [PATCH 0111/1113] Fix Doc mistake Fix Doc mistake in ansible/docsite/rst/playbooks_variables.rst --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 307387a72e5..122c0ef9232 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -796,7 +796,7 @@ Basically, anything that goes into "role defaults" (the defaults folder inside t .. [1] Tasks in each role will see their own role's defaults. Tasks defined outside of a role will see the last role's defaults. .. [2] Variables defined in inventory file or provided by dynamic inventory. -.. note:: Within a any section, redefining a var will overwrite the previous instance. +.. note:: Within any section, redefining a var will overwrite the previous instance. If multiple groups have the same variable, the last one loaded wins. If you define a variable twice in a play's vars: section, the 2nd one wins. .. note:: the previous describes the default config `hash_behavior=replace`, switch to 'merge' to only partially overwrite. From 1b2ebe8defddbb6f6cd471f999d6eba8b78f1446 Mon Sep 17 00:00:00 2001 From: Robin Roth <robin-roth@online.de> Date: Sun, 13 Dec 2015 10:56:47 +0100 Subject: [PATCH 0112/1113] make shallow clone the default for ansibel-pull --- lib/ansible/cli/pull.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 67e89259303..7b2fd13e5ee 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -80,8 +80,8 @@ class PullCLI(CLI): help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') - self.parser.add_option('--depth', dest='depth', default=None, - help='Depth of checkout, shallow checkout if greater or equal 1 . Defaults to full checkout.') + self.parser.add_option('--full', dest='fullclone', action='store_true', + help='Do a full clone, instead of a shallow one.') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', @@ -157,8 +157,8 @@ class PullCLI(CLI): if self.options.verify: repo_opts += ' verify_commit=yes' - if self.options.depth: - repo_opts += ' depth=%s' % self.options.depth + if not self.options.fullclone: + repo_opts += ' depth=1' path = module_loader.find_plugin(self.options.module_name) From 1bd8d97093f30e4848640a5c43a7f830a9112e2f Mon Sep 17 00:00:00 2001 From: Robin Roth <robin-roth@online.de> Date: Sun, 13 Dec 2015 11:19:50 +0100 Subject: [PATCH 0113/1113] fix whitespace --- lib/ansible/cli/pull.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 7b2fd13e5ee..2571717766e 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -156,7 +156,7 @@ class PullCLI(CLI): if self.options.verify: repo_opts += ' verify_commit=yes' - + if not self.options.fullclone: repo_opts += ' depth=1' From d8e6bc98a2494628aca2fc406655dce70701f525 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouse@ansible.com> Date: Wed, 9 Dec 2015 17:09:34 -0500 Subject: [PATCH 0114/1113] Fix overloaded options. Show an error when no action given. Don't show a helpful list of commands and descriptions. --- lib/ansible/cli/galaxy.py | 68 ++++++++------------------------------- 1 file changed, 13 insertions(+), 55 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 0f9074da935..13df7c41220 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -48,50 +48,14 @@ except ImportError: class GalaxyCLI(CLI): - available_commands = { - "delete": "remove a role from Galaxy", - "import": "add a role contained in a GitHub repo to Galaxy", - "info": "display details about a particular role", - "init": "create a role directory structure in your roles path", - "install": "download a role into your roles path", - "list": "enumerate roles found in your roles path", - "login": "authenticate with Galaxy API and store the token", - "remove": "delete a role from your roles path", - "search": "query the Galaxy API", - "setup": "add a TravisCI integration to Galaxy", - } - SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - + VALID_ACTIONS = ("delete","import","info","init","install","list","login","remove","search","setup") + def __init__(self, args): - self.VALID_ACTIONS = self.available_commands.keys() - self.VALID_ACTIONS.sort() self.api = None self.galaxy = None super(GalaxyCLI, self).__init__(args) - def set_action(self): - """ - Get the action the user wants to execute from the sys argv list. - """ - for i in range(0,len(self.args)): - arg = self.args[i] - if arg in self.VALID_ACTIONS: - self.action = arg - del self.args[i] - break - - if not self.action: - self.show_available_actions() - - def show_available_actions(self): - # list available commands - display.display(u'\n' + "usage: ansible-galaxy COMMAND [--help] [options] ...") - display.display(u'\n' + "availabe commands:" + u'\n\n') - for key in self.VALID_ACTIONS: - display.display(u'\t' + "%-12s %s" % (key, self.available_commands[key])) - display.display(' ') - def parse(self): ''' create an options parser for bin/ansible ''' @@ -107,11 +71,11 @@ class GalaxyCLI(CLI): self.parser.set_usage("usage: %prog delete [options] github_user github_repo") elif self.action == "import": self.parser.set_usage("usage: %prog import [options] github_user github_repo") - self.parser.add_option('-n', '--no-wait', dest='wait', action='store_false', default=True, + self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.') - self.parser.add_option('-b', '--branch', dest='reference', + self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)') - self.parser.add_option('-t', '--status', dest='check_status', action='store_true', default=False, + self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.') elif self.action == "info": self.parser.set_usage("usage: %prog info [options] role_name[,version]") @@ -147,15 +111,14 @@ class GalaxyCLI(CLI): help='GitHub username') self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]") elif self.action == "setup": - self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret" + - u'\n\n' + "Create an integration with travis.") - self.parser.add_option('-r', '--remove', dest='remove_id', default=None, + self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret") + self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.') - self.parser.add_option('-l', '--list', dest="setup_list", action='store_true', default=False, + self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.') # options that apply to more than one action - if not self.action in ("config","import","init","login","setup"): + if not self.action in ("import","init","login","setup"): self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' @@ -171,19 +134,14 @@ class GalaxyCLI(CLI): self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') - if self.action: - # get options, args and galaxy object - self.options, self.args =self.parser.parse_args() - display.verbosity = self.options.verbosity - self.galaxy = Galaxy(self.options) + self.options, self.args =self.parser.parse_args() + display.verbosity = self.options.verbosity + self.galaxy = Galaxy(self.options) return True def run(self): - - if not self.action: - return True - + super(GalaxyCLI, self).run() # if not offline, get connect to galaxy api From 989604b1a3977e6246f997d1a75aaf97776b28ae Mon Sep 17 00:00:00 2001 From: chouseknecht <chouse@ansible.com> Date: Wed, 9 Dec 2015 17:12:53 -0500 Subject: [PATCH 0115/1113] Fix typo. --- docsite/rst/galaxy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 783ac15e456..c9dea273367 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -126,7 +126,7 @@ The above will create the following directory structure in the current working d :: README.md - .travsis.yml + .travis.yml defaults/ main.yml files/ From bc7392009069749042bf937eb315ea19c513d0ff Mon Sep 17 00:00:00 2001 From: chouseknecht <chouse@ansible.com> Date: Wed, 9 Dec 2015 18:28:57 -0500 Subject: [PATCH 0116/1113] Updated ansible-galaxy man page. Removed -b option for import. --- docs/man/man1/ansible-galaxy.1.asciidoc.in | 202 ++++++++++++++++++++- lib/ansible/cli/galaxy.py | 4 +- 2 files changed, 201 insertions(+), 5 deletions(-) diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in index e6f2d0b4568..44f0b46b085 100644 --- a/docs/man/man1/ansible-galaxy.1.asciidoc.in +++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in @@ -12,7 +12,7 @@ ansible-galaxy - manage roles using galaxy.ansible.com SYNOPSIS -------- -ansible-galaxy [init|info|install|list|remove] [--help] [options] ... +ansible-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [--help] [options] ... DESCRIPTION @@ -20,7 +20,7 @@ DESCRIPTION *Ansible Galaxy* is a shared repository for Ansible roles. The ansible-galaxy command can be used to manage these roles, -or by creating a skeleton framework for roles you'd like to upload to Galaxy. +or for creating a skeleton framework for roles you'd like to upload to Galaxy. COMMON OPTIONS -------------- @@ -29,7 +29,6 @@ COMMON OPTIONS Show a help message related to the given sub-command. - INSTALL ------- @@ -145,6 +144,203 @@ The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) +SEARCH +------ + +The *search* sub-command returns a filtered list of roles found at +galaxy.ansible.com. + +USAGE +~~~~~ + +$ ansible-galaxy search [options] [searchterm1 searchterm2] + + +OPTIONS +~~~~~~~ +*--galaxy-tags*:: + +Provide a comma separated list of Galaxy Tags on which to filter. + +*--platforms*:: + +Provide a comma separated list of Platforms on which to filter. + +*--author*:: + +Specify the username of a Galaxy contributor on which to filter. + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +INFO +---- + +The *info* sub-command shows detailed information for a specific role. +Details returned about the role included information from the local copy +as well as information from galaxy.ansible.com. + +USAGE +~~~~~ + +$ ansible-galaxy info [options] role_name[, version] + +OPTIONS +~~~~~~~ + +*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: + +The path to the directory containing your roles. The default is the *roles_path* +configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +LOGIN +----- + +The *login* sub-command is used to authenticate with galaxy.ansible.com. +Authentication is required to use the import, delete and setup commands. +It will authenticate the user,retrieve a token from Galaxy, and store it +in the user's home directory. + +USAGE +~~~~~ + +$ ansible-galaxy login [options] + +The *login* sub-command prompts for a *GitHub* username and password. It does +NOT send your password to Galaxy. It actually authenticates with GitHub and +creates a personal access token. It then sends the personal access token to +Galaxy, which in turn verifies that you are you and returns a Galaxy access +token. After authentication completes the *GitHub* personal access token is +destroyed. + +If you do not wish to use your GitHub password, or if you have two-factor +authentication enabled with GitHub, use the *--github-token* option to pass a +personal access token that you create. Log into GitHub, go to Settings and +click on Personal Access Token to create a token. + +OPTIONS +~~~~~~~ + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +*--github-token*:: + +Authenticate using a *GitHub* personal access token rather than a password. + + +IMPORT +------ + +Import a role from *GitHub* to galaxy.ansible.com. Requires the user first +authenticate with galaxy.ansible.com using the *login* subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy import [options] github_user github_repo + +OPTIONS +~~~~~~~ +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +*--branch*:: + +Provide a specific branch to import. When a branch is not specified the +branch found in meta/main.yml is used. If no branch is specified in +meta/main.yml, the repo's default branch (usually master) is used. + + +DELETE +------ + +The *delete* sub-command will delete a role from galaxy.ansible.com. Requires +the user first authenticate with galaxy.ansible.com using the *login* subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy delete [options] github_user github_repo + +OPTIONS +~~~~~~~ + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +SETUP +----- + +The *setup* sub-command creates an integration point for *Travis CI*, enabling +galaxy.ansible.com to receive notifications from *Travis* on build completion. +Requires the user first authenticate with galaxy.ansible.com using the *login* +subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy setup [options] source github_user github_repo secret + +* Use *travis* as the source value. In the future additional source values may + be added. + +* Provide your *Travis* user token as the secret. The token is not stored by + galaxy.ansible.com. A hash is created using github_user, github_repo + and your token. The hash value is what actually gets stored. + +OPTIONS +~~~~~~~ + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +--list:: + +Show your configured integrations. Provids the ID of each integration +which can be used with the remove option. + +--remove:: + +Remove a specific integration. Provide the ID of the integration to +be removed. + AUTHOR ------ diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 13df7c41220..1cd936d028e 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -100,7 +100,7 @@ class GalaxyCLI(CLI): self.parser.set_usage("usage: %prog list [role_name]") elif self.action == "login": self.parser.set_usage("usage: %prog login [options]") - self.parser.add_option('-g','--github-token', dest='token', default=None, + self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.') elif self.action == "search": self.parser.add_option('--platforms', dest='platforms', @@ -118,7 +118,7 @@ class GalaxyCLI(CLI): help='List all of your integrations.') # options that apply to more than one action - if not self.action in ("import","init","login","setup"): + if not self.action in ("delete","import","init","login","setup"): self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' From f1c72ff8f51b749165d5bc4089ca8c8fd5b22789 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouse@ansible.com> Date: Wed, 9 Dec 2015 22:04:00 -0500 Subject: [PATCH 0117/1113] Make sure it is clear that new commands require using the Galaxy 2.0 Beta site. --- docsite/rst/galaxy.rst | 58 +++++++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index c9dea273367..3a12044ca9e 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -1,7 +1,7 @@ Ansible Galaxy ++++++++++++++ -"Ansible Galaxy" can either refer to a website for sharing and downloading Ansible roles, or a command line tool that helps work with roles. +"Ansible Galaxy" can either refer to a website for sharing and downloading Ansible roles, or a command line tool for managing and creating roles. .. contents:: Topics @@ -10,24 +10,36 @@ The Website The website `Ansible Galaxy <https://galaxy.ansible.com>`_, is a free site for finding, downloading, and sharing community developed Ansible roles. Downloading roles from Galaxy is a great way to jumpstart your automation projects. -You can sign up with social auth and use the download client 'ansible-galaxy' which is included in Ansible 1.4.2 and later. +Access the Galaxy web site using GitHub OAuth, and to install roles use the 'ansible-galaxy' command line tool included in Ansible 1.4.2 and later. Read the "About" page on the Galaxy site for more information. The ansible-galaxy command line tool ```````````````````````````````````` -The command line ansible-galaxy has many different subcommands. +The ansible-galaxy command has many different sub-commands for managing roles both locally and at `galaxy.ansible.com <https://galaxy.ansible.com>`_. + +.. note:: + + The search, login, import, delete, and setup commands in the Ansible 2.0 version of ansible-galaxy require access to the + 2.0 Beta release of the Galaxy web site available at `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. + + Use the ``--server`` option to access the beta site. For example:: + + $ ansible-galaxy search --server https://galaxy-qa.ansible.com mysql --author geerlingguy + + Additionally, you can define a server in ansible.cfg:: + + [galaxy] + server=https://galaxy-qa.ansible.com Installing Roles ---------------- -The most obvious is downloading roles from the Ansible Galaxy website:: +The most obvious use of the ansible-galaxy command is downloading roles from `the Ansible Galaxy website <https://galaxy.ansible.com>`_:: $ ansible-galaxy install username.rolename -.. _galaxy_cli_roles_path: - roles_path =============== @@ -169,7 +181,9 @@ The search command will return a list of the first 1000 results matching your se .. note:: - The format of results pictured here is new in Ansible 2.0. + The search command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Get More Information About a Role --------------------------------- @@ -213,10 +227,6 @@ This returns everything found in Galaxy for the role: version: watchers_count: 1 -.. note:: - - The format of results pictured here is new in Ansible 2.0. - List Installed Roles -------------------- @@ -262,7 +272,13 @@ To use the import, delete and setup commands authentication with Galaxy is requi As depicted above, the login command prompts for a GitHub username and password. It does NOT send your password to Galaxy. It actually authenticates with GitHub and creates a personal access token. It then sends the personal access token to Galaxy, which in turn verifies that you are you and returns a Galaxy access token. After authentication completes the GitHub personal access token is destroyed. -If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the --github-token option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token. +If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the --github-token option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token. + +.. note:: + + The login command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Import a Role ------------- @@ -298,7 +314,9 @@ If the --no-wait option is present, the command will not wait for results. Resul .. note:: - The import command is only available in Ansible 2.0. + The import command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Delete a Role ------------- @@ -307,13 +325,15 @@ Remove a role from the Galaxy web site using the delete command. You can delete :: - ansible-galaxy delete github_user github_repo + $ ansible-galaxy delete github_user github_repo This only removes the role from Galaxy. It does not impact the actual GitHub repo. .. note:: - The delete command is only available in Ansible 2.0. + The delete command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Setup Travis Integerations -------------------------- @@ -324,7 +344,7 @@ Using the setup command you can enable notifications from `travis <http://travis $ ansible-galaxy setup travis github_user github_repo xxxtravistokenxxx - Added integration for travis chouseknecht/ansible-role-sendmail + Added integration for travis github_user/github_repo The setup command requires your Travis token. The Travis token is not stored in Galaxy. It is used along with the GitHub username and repo to create a hash as described in `the Travis documentation <https://docs.travis-ci.com/user/notifications/>`_. The calculated hash is stored in Galaxy and used to verify notifications received from Travis. @@ -339,7 +359,9 @@ When you create your .travis.yml file add the following to cause Travis to notif .. note:: - The setup command is only available in Ansible 2.0. + The setup command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. List Travis Integrtions @@ -361,7 +383,7 @@ Use the --list option to display your Travis integrations: Remove Travis Integrations ========================== -Use the --remove option to disable a Travis integration: +Use the --remove option to disable and remove a Travis integration: :: From 342dee0023e2c6fd6d361a70fec621c09b833915 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouse@ansible.com> Date: Wed, 9 Dec 2015 22:56:54 -0500 Subject: [PATCH 0118/1113] Define and handle ignore_certs correctly. Preserve search term order. Tweak to Galaxy docsite. --- docsite/rst/galaxy.rst | 2 +- lib/ansible/cli/galaxy.py | 8 ++++---- lib/ansible/galaxy/api.py | 18 ++++++++---------- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 3a12044ca9e..200fdfd5750 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -41,7 +41,7 @@ The most obvious use of the ansible-galaxy command is downloading roles from `th $ ansible-galaxy install username.rolename roles_path -=============== +========== You can specify a particular directory where you want the downloaded roles to be placed:: diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 1cd936d028e..a4a7b915f36 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -127,7 +127,7 @@ class GalaxyCLI(CLI): if self.action in ("import","info","init","install","login","search","setup","delete"): self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination') - self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True, + self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=False, help='Ignore SSL certificate validation errors.') if self.action in ("init","install"): @@ -505,7 +505,7 @@ class GalaxyCLI(CLI): terms = [] for i in range(len(self.args)): terms.append(self.args.pop()) - search = '+'.join(terms) + search = '+'.join(terms[::-1]) if not search and not self.options.platforms and not self.options.tags and not self.options.author: raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.") @@ -520,9 +520,9 @@ class GalaxyCLI(CLI): data = '' if response['count'] > page_size: - data += ("Found %d roles matching your search. Showing first %s.\n" % (response['count'], page_size)) + data += ("\nFound %d roles matching your search. Showing first %s.\n" % (response['count'], page_size)) else: - data += ("Found %d roles matching your search:\n" % response['count']) + data += ("\nFound %d roles matching your search:\n" % response['count']) max_len = [] for role in response['results']: diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py index c1bf2c4ed50..eec9ee932e0 100644 --- a/lib/ansible/galaxy/api.py +++ b/lib/ansible/galaxy/api.py @@ -48,16 +48,15 @@ class GalaxyAPI(object): SUPPORTED_VERSIONS = ['v1'] def __init__(self, galaxy): - self.galaxy = galaxy self.token = GalaxyToken() self._api_server = C.GALAXY_SERVER - self._validate_certs = C.GALAXY_IGNORE_CERTS + self._validate_certs = not C.GALAXY_IGNORE_CERTS # set validate_certs - if galaxy.options.validate_certs == False: + if galaxy.options.ignore_certs: self._validate_certs = False - display.vvv('Check for valid certs: %s' % self._validate_certs) + display.vvv('Validate TLS certificates: %s' % self._validate_certs) # set the API server if galaxy.options.api_server != C.GALAXY_SERVER: @@ -65,14 +64,13 @@ class GalaxyAPI(object): display.vvv("Connecting to galaxy_server: %s" % self._api_server) server_version = self.get_server_api_version() - - if server_version in self.SUPPORTED_VERSIONS: - self.baseurl = '%s/api/%s' % (self._api_server, server_version) - self.version = server_version # for future use - display.vvv("Base API: %s" % self.baseurl) - else: + if not server_version in self.SUPPORTED_VERSIONS: raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version) + self.baseurl = '%s/api/%s' % (self._api_server, server_version) + self.version = server_version # for future use + display.vvv("Base API: %s" % self.baseurl) + def __auth_header(self): token = self.token.get() if token is None: From 847f454bccb6ec3942ff5d652db7dd1db4d77159 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouse@ansible.com> Date: Wed, 9 Dec 2015 23:25:23 -0500 Subject: [PATCH 0119/1113] Add a section to intro_configuration for Galaxy. --- docsite/rst/intro_configuration.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index dda07fc4506..0ad54938d08 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -897,3 +897,19 @@ The normal behaviour is for operations to copy the existing context or use the u The default list is: nfs,vboxsf,fuse,ramfs:: special_context_filesystems = nfs,vboxsf,fuse,ramfs,myspecialfs + +Galaxy Settings +--------------- + +The following options can be set in the [galaxy] section of ansible.cfg: + +server +====== + +Override the default Galaxy server value of https://galaxy.ansible.com. + +ignore_certs +============ + +If set to *yes*, ansible-galaxy will not validate TLS certificates. Handy for testing against a server with a self-signed certificate +. \ No newline at end of file From 06dde0d332d88e958ac5489bea88f0f5bc536e1b Mon Sep 17 00:00:00 2001 From: chouseknecht <chouse@ansible.com> Date: Thu, 10 Dec 2015 10:57:48 -0500 Subject: [PATCH 0120/1113] Fixed documentation typos and bits that needed clarification. Fixed missing spaces in VALID_ACTIONS. --- docs/man/man1/ansible-galaxy.1.asciidoc.in | 19 ++++++++++--------- docsite/rst/galaxy.rst | 4 ++-- docsite/rst/intro_configuration.rst | 4 ++-- lib/ansible/cli/galaxy.py | 2 +- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in index 44f0b46b085..9ffe65e45a7 100644 --- a/docs/man/man1/ansible-galaxy.1.asciidoc.in +++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in @@ -147,8 +147,9 @@ configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) SEARCH ------ -The *search* sub-command returns a filtered list of roles found at -galaxy.ansible.com. +The *search* sub-command returns a filtered list of roles found on the remote +server. + USAGE ~~~~~ @@ -170,7 +171,7 @@ Provide a comma separated list of Platforms on which to filter. Specify the username of a Galaxy contributor on which to filter. -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -199,7 +200,7 @@ OPTIONS The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -213,7 +214,7 @@ LOGIN The *login* sub-command is used to authenticate with galaxy.ansible.com. Authentication is required to use the import, delete and setup commands. -It will authenticate the user,retrieve a token from Galaxy, and store it +It will authenticate the user, retrieve a token from Galaxy, and store it in the user's home directory. USAGE @@ -236,7 +237,7 @@ click on Personal Access Token to create a token. OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -262,7 +263,7 @@ $ ansible-galaxy import [options] github_user github_repo OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -291,7 +292,7 @@ $ ansible-galaxy delete [options] github_user github_repo OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -323,7 +324,7 @@ $ ansible-galaxy setup [options] source github_user github_repo secret OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 200fdfd5750..f8cde57e62c 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -364,8 +364,8 @@ When you create your .travis.yml file add the following to cause Travis to notif section of your ansible.cfg file. -List Travis Integrtions -======================= +List Travis Integrations +======================== Use the --list option to display your Travis integrations: diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 0ad54938d08..ccfb456ed93 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -906,10 +906,10 @@ The following options can be set in the [galaxy] section of ansible.cfg: server ====== -Override the default Galaxy server value of https://galaxy.ansible.com. +Override the default Galaxy server value of https://galaxy.ansible.com. Useful if you have a hosted version of the Galaxy web app or want to point to the testing site https://galaxy-qa.ansible.com. It does not work against private, hosted repos, which Galaxy can use for fetching and installing roles. ignore_certs ============ If set to *yes*, ansible-galaxy will not validate TLS certificates. Handy for testing against a server with a self-signed certificate -. \ No newline at end of file +. diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index a4a7b915f36..34afa03c9f7 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -49,7 +49,7 @@ except ImportError: class GalaxyCLI(CLI): SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - VALID_ACTIONS = ("delete","import","info","init","install","list","login","remove","search","setup") + VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup") def __init__(self, args): self.api = None From 95785f149d21badaf7cba35b4ffa7ed5805235d4 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouse@ansible.com> Date: Thu, 10 Dec 2015 21:44:03 -0500 Subject: [PATCH 0121/1113] Fix docs. The search command works with both galaxy.ansible.com and galaxy-qa.ansible.com. --- docsite/rst/galaxy.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index f8cde57e62c..6d64a542b4a 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -181,9 +181,7 @@ The search command will return a list of the first 1000 results matching your se .. note:: - The search command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access - `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy] - section of your ansible.cfg file. + The format of results pictured here is new in Ansible 2.0. Get More Information About a Role --------------------------------- From 2bc3683d41b307611a03447e9d4b194ba6ef5c1c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 13 Dec 2015 05:54:57 -0800 Subject: [PATCH 0122/1113] Restore comment about for-else since it is an uncommon idiom --- lib/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 154404e474c..254bab476bb 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -119,7 +119,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type) if module_path: break - else: + else: # This is a for-else: http://bit.ly/1ElPkyg # Use Windows version of ping module to check module paths when # using a connection that supports .ps1 suffixes. We check specifically # for win_ping here, otherwise the code would look for ping.ps1 From 0c954bd14298a81be4c9026563326a87f9c42f58 Mon Sep 17 00:00:00 2001 From: Robin Roth <robin-roth@online.de> Date: Sun, 13 Dec 2015 18:00:54 +0100 Subject: [PATCH 0123/1113] add --full flag to ansible-pull man page add --full flag that was added in #13502 --- docs/man/man1/ansible-pull.1.asciidoc.in | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in index 333b8e34e0f..0afba2aeaac 100644 --- a/docs/man/man1/ansible-pull.1.asciidoc.in +++ b/docs/man/man1/ansible-pull.1.asciidoc.in @@ -95,6 +95,10 @@ Force running of playbook even if unable to update playbook repository. This can be useful, for example, to enforce run-time state when a network connection may not always be up or possible. +*--full*:: + +Do a full clone of the repository. By default ansible-pull will do a shallow clone based on the last revision. + *-h*, *--help*:: Show the help message and exit. From 89603a0509117610e2cbebc6c48475a3b8af98b2 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sun, 13 Dec 2015 12:18:28 -0500 Subject: [PATCH 0124/1113] added that ansible-pull is now shallow to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bf11e6c5bc..c6319634fb7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -350,6 +350,7 @@ newline being stripped you can change your playbook like this: * We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. * environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default. +* ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. * Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour: ``` From f8ff63f8c8ab001ea8f096968b550f23262c193c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 14 Dec 2015 03:06:52 -0500 Subject: [PATCH 0125/1113] A few tweaks to improve new forking code --- lib/ansible/plugins/strategy/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index ea30b800b02..4047bde73a2 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -149,17 +149,20 @@ class StrategyBase: # way to share them with the forked processes shared_loader_obj = SharedPluginLoaderObj() + queued = False while True: (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] if worker_prc is None or not worker_prc.is_alive(): worker_prc = WorkerProcess(rslt_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj) self._workers[self._cur_worker][0] = worker_prc worker_prc.start() - break + queued = True self._cur_worker += 1 if self._cur_worker >= len(self._workers): self._cur_worker = 0 time.sleep(0.0001) + if queued: + break del task_vars self._pending_results += 1 @@ -196,7 +199,7 @@ class StrategyBase: else: iterator.mark_host_failed(host) (state, tmp_task) = iterator.get_next_task_for_host(host, peek=True) - if state.run_state != PlayIterator.ITERATING_RESCUE: + if not state or state.run_state != PlayIterator.ITERATING_RESCUE: self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) else: From 279c5a359631d296e1a91c1520417e68750138bb Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 14 Dec 2015 03:07:20 -0500 Subject: [PATCH 0126/1113] Cleanup strategy tests broken by new forking strategy --- .../plugins/strategies/test_strategy_base.py | 125 +++++++++++------- 1 file changed, 75 insertions(+), 50 deletions(-) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index bf01cf6fcc2..7cc81a0324e 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -24,8 +24,11 @@ from ansible.compat.tests.mock import patch, MagicMock from ansible.errors import AnsibleError, AnsibleParserError from ansible.plugins.strategy import StrategyBase +from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_queue_manager import TaskQueueManager from ansible.executor.task_result import TaskResult +from ansible.playbook.handler import Handler +from ansible.inventory.host import Host from six.moves import queue as Queue from units.mock.loader import DictDataLoader @@ -98,37 +101,44 @@ class TestStrategyBase(unittest.TestCase): mock_tqm._unreachable_hosts = ["host02"] self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:]) - def test_strategy_base_queue_task(self): + @patch.object(WorkerProcess, 'run') + def test_strategy_base_queue_task(self, mock_worker): + def fake_run(self): + return + + mock_worker.run.side_effect = fake_run + fake_loader = DictDataLoader() + mock_var_manager = MagicMock() + mock_host = MagicMock() + mock_inventory = MagicMock() + mock_options = MagicMock() + mock_options.module_path = None + + tqm = TaskQueueManager( + inventory=mock_inventory, + variable_manager=mock_var_manager, + loader=fake_loader, + options=mock_options, + passwords=None, + ) + tqm._initialize_processes(3) + tqm.hostvars = dict() - workers = [] - for i in range(0, 3): - worker_main_q = MagicMock() - worker_main_q.put.return_value = None - worker_result_q = MagicMock() - workers.append([i, worker_main_q, worker_result_q]) - - mock_tqm = MagicMock() - mock_tqm._final_q = MagicMock() - mock_tqm.get_workers.return_value = workers - mock_tqm.get_loader.return_value = fake_loader - - strategy_base = StrategyBase(tqm=mock_tqm) - strategy_base._cur_worker = 0 - strategy_base._pending_results = 0 - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 1) - self.assertEqual(strategy_base._pending_results, 1) - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 2) - self.assertEqual(strategy_base._pending_results, 2) - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 0) - self.assertEqual(strategy_base._pending_results, 3) - workers[0][1].put.side_effect = EOFError - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 1) - self.assertEqual(strategy_base._pending_results, 3) + try: + strategy_base = StrategyBase(tqm=tqm) + strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 1) + self.assertEqual(strategy_base._pending_results, 1) + strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 2) + self.assertEqual(strategy_base._pending_results, 2) + strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 0) + self.assertEqual(strategy_base._pending_results, 3) + finally: + tqm.cleanup() + def test_strategy_base_process_pending_results(self): mock_tqm = MagicMock() @@ -156,6 +166,7 @@ class TestStrategyBase(unittest.TestCase): mock_iterator = MagicMock() mock_iterator.mark_host_failed.return_value = None + mock_iterator.get_next_task_for_host.return_value = (None, None) mock_host = MagicMock() mock_host.name = 'test01' @@ -315,22 +326,15 @@ class TestStrategyBase(unittest.TestCase): res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator) self.assertEqual(res, []) - def test_strategy_base_run_handlers(self): - workers = [] - for i in range(0, 3): - worker_main_q = MagicMock() - worker_main_q.put.return_value = None - worker_result_q = MagicMock() - workers.append([i, worker_main_q, worker_result_q]) - - mock_tqm = MagicMock() - mock_tqm._final_q = MagicMock() - mock_tqm.get_workers.return_value = workers - mock_tqm.send_callback.return_value = None - + @patch.object(WorkerProcess, 'run') + def test_strategy_base_run_handlers(self, mock_worker): + def fake_run(*args): + return + mock_worker.side_effect = fake_run mock_play_context = MagicMock() - mock_handler_task = MagicMock() + mock_handler_task = MagicMock(Handler) + mock_handler_task.action = 'foo' mock_handler_task.get_name.return_value = "test handler" mock_handler_task.has_triggered.return_value = False @@ -341,11 +345,9 @@ class TestStrategyBase(unittest.TestCase): mock_play = MagicMock() mock_play.handlers = [mock_handler] - mock_host = MagicMock() + mock_host = MagicMock(Host) mock_host.name = "test01" - mock_iterator = MagicMock() - mock_inventory = MagicMock() mock_inventory.get_hosts.return_value = [mock_host] @@ -355,8 +357,31 @@ class TestStrategyBase(unittest.TestCase): mock_iterator = MagicMock mock_iterator._play = mock_play - strategy_base = StrategyBase(tqm=mock_tqm) - strategy_base._inventory = mock_inventory - strategy_base._notified_handlers = {"test handler": [mock_host]} + fake_loader = DictDataLoader() + mock_options = MagicMock() + mock_options.module_path = None - result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context) + tqm = TaskQueueManager( + inventory=mock_inventory, + variable_manager=mock_var_mgr, + loader=fake_loader, + options=mock_options, + passwords=None, + ) + tqm._initialize_processes(3) + tqm.hostvars = dict() + + try: + strategy_base = StrategyBase(tqm=tqm) + + strategy_base._inventory = mock_inventory + strategy_base._notified_handlers = {"test handler": [mock_host]} + + mock_return_task = MagicMock(Handler) + mock_return_host = MagicMock(Host) + task_result = TaskResult(mock_return_host, mock_return_task, dict(changed=False)) + tqm._final_q.put(('host_task_ok', task_result)) + + result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context) + finally: + tqm.cleanup() From f5f9b2fd354fe013e68f589279cc349a42a461fb Mon Sep 17 00:00:00 2001 From: Hans-Joachim Kliemeck <git@kliemeck.de> Date: Mon, 14 Dec 2015 14:36:35 +0100 Subject: [PATCH 0127/1113] use default settings from ansible.cfg --- lib/ansible/cli/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 012872be7c5..48e01346726 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -246,7 +246,7 @@ class CLI(object): help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) if vault_opts: - parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', + parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, dest='vault_password_file', help="vault password file", action="callback", callback=CLI.expand_tilde, type=str) From 1f8e484b70f90d34d127eda9cf10a619bb0e72e8 Mon Sep 17 00:00:00 2001 From: Monty Taylor <mordred@inaugust.com> Date: Thu, 3 Dec 2015 07:07:13 -0800 Subject: [PATCH 0128/1113] Fix the refresh flag in openstack inventory Refresh will update the dogpile cache from shade, but doesn't cause the ansible side json cache to be invalidated. It's a simple oversight. --- contrib/inventory/openstack.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py index 46b43e92212..231488b06df 100755 --- a/contrib/inventory/openstack.py +++ b/contrib/inventory/openstack.py @@ -94,9 +94,9 @@ def get_groups_from_server(server_vars): return groups -def get_host_groups(inventory): +def get_host_groups(inventory, refresh=False): (cache_file, cache_expiration_time) = get_cache_settings() - if is_cache_stale(cache_file, cache_expiration_time): + if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): groups = to_json(get_host_groups_from_cloud(inventory)) open(cache_file, 'w').write(groups) else: @@ -121,8 +121,10 @@ def get_host_groups_from_cloud(inventory): return groups -def is_cache_stale(cache_file, cache_expiration_time): +def is_cache_stale(cache_file, cache_expiration_time, refresh=False): ''' Determines if cache file has expired, or if it is still valid ''' + if refresh: + return True if os.path.isfile(cache_file): mod_time = os.path.getmtime(cache_file) current_time = time.time() @@ -176,7 +178,7 @@ def main(): ) if args.list: - output = get_host_groups(inventory) + output = get_host_groups(inventory, refresh=args.refresh) elif args.host: output = to_json(inventory.get_host(args.host)) print(output) From 49dc9eea169efb329d7d184df53ce3dea4dface1 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy <jon@soh.re> Date: Wed, 9 Dec 2015 15:11:21 -0500 Subject: [PATCH 0129/1113] add tests for encrypted hash mysql_user --- .../tasks/user_password_update_test.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml index 50307cef956..9a899b206ca 100644 --- a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml +++ b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml @@ -79,8 +79,23 @@ - include: remove_user.yml user_name={{user_name_2}} user_password={{ user_password_1 }} +- name: Create user with password1234 using hash. (expect changed=true) + mysql_user: name=jmainguy password='*D65798AAC0E5C6DF3F320F8A30E026E7EBD73A95' encrypted=yes + register: encrypt_result +- name: Check that the module made a change + assert: + that: + - "encrypt_result.changed == True" +- name: See if the password needs to be updated. (expect changed=false) + mysql_user: name=jmainguy password='password1234' + register: plain_result +- name: Check that the module did not change the password + assert: + that: + - "plain_result.changed == False" - +- name: Remove user (cleanup) + mysql_user: name=jmainguy state=absent From 9f61144401a16c9d610193522c71e8852addf63e Mon Sep 17 00:00:00 2001 From: Monty Taylor <mordred@inaugust.com> Date: Thu, 3 Dec 2015 07:04:24 -0800 Subject: [PATCH 0130/1113] Optionally only use UUIDs for openstack hosts on duplicates The OpenStack inventory lists hostnames as the UUIDs because hostsnames are not guarnateed to be unique on OpenStack. However, for the common case, this is just confusing. The new behavior is a visible change, so make it an opt-in via config. Only turn the hostnames to UUIDs if there are duplicate hostnames. --- contrib/inventory/openstack.py | 57 +++++++++++++++++++++++++++------ contrib/inventory/openstack.yml | 3 ++ 2 files changed, 50 insertions(+), 10 deletions(-) diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py index 231488b06df..b82a042c29e 100755 --- a/contrib/inventory/openstack.py +++ b/contrib/inventory/openstack.py @@ -32,6 +32,13 @@ # all of them and present them as one contiguous inventory. # # See the adjacent openstack.yml file for an example config file +# There are two ansible inventory specific options that can be set in +# the inventory section. +# expand_hostvars controls whether or not the inventory will make extra API +# calls to fill out additional information about each server +# use_hostnames changes the behavior from registering every host with its UUID +# and making a group of its hostname to only doing this if the +# hostname in question has more than one server import argparse import collections @@ -51,7 +58,7 @@ import shade.inventory CONFIG_FILES = ['/etc/ansible/openstack.yaml'] -def get_groups_from_server(server_vars): +def get_groups_from_server(server_vars, namegroup=True): groups = [] region = server_vars['region'] @@ -76,7 +83,8 @@ def get_groups_from_server(server_vars): groups.append(extra_group) groups.append('instance-%s' % server_vars['id']) - groups.append(server_vars['name']) + if namegroup: + groups.append(server_vars['name']) for key in ('flavor', 'image'): if 'name' in server_vars[key]: @@ -106,17 +114,36 @@ def get_host_groups(inventory, refresh=False): def get_host_groups_from_cloud(inventory): groups = collections.defaultdict(list) + firstpass = collections.defaultdict(list) hostvars = {} - for server in inventory.list_hosts(): + list_args = {} + if hasattr(inventory, 'extra_config'): + use_hostnames = inventory.extra_config['use_hostnames'] + list_args['expand'] = inventory.extra_config['expand_hostvars'] + else: + use_hostnames = False + + for server in inventory.list_hosts(**list_args): if 'interface_ip' not in server: continue - for group in get_groups_from_server(server): - groups[group].append(server['id']) - hostvars[server['id']] = dict( - ansible_ssh_host=server['interface_ip'], - openstack=server, - ) + firstpass[server['name']].append(server) + for name, servers in firstpass.items(): + if len(servers) == 1 and use_hostnames: + server = servers[0] + hostvars[name] = dict( + ansible_ssh_host=server['interface_ip'], + openstack=server) + for group in get_groups_from_server(server, namegroup=False): + groups[group].append(server['name']) + else: + for server in servers: + server_id = server['id'] + hostvars[server_id] = dict( + ansible_ssh_host=server['interface_ip'], + openstack=server) + for group in get_groups_from_server(server, namegroup=True): + groups[group].append(server_id) groups['_meta'] = {'hostvars': hostvars} return groups @@ -171,11 +198,21 @@ def main(): try: config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES shade.simple_logging(debug=args.debug) - inventory = shade.inventory.OpenStackInventory( + inventory_args = dict( refresh=args.refresh, config_files=config_files, private=args.private, ) + if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): + inventory_args.update(dict( + config_key='ansible', + config_defaults={ + 'use_hostnames': False, + 'expand_hostvars': True, + } + )) + + inventory = shade.inventory.OpenStackInventory(**inventory_args) if args.list: output = get_host_groups(inventory, refresh=args.refresh) diff --git a/contrib/inventory/openstack.yml b/contrib/inventory/openstack.yml index a99bb020580..1520e2937ec 100644 --- a/contrib/inventory/openstack.yml +++ b/contrib/inventory/openstack.yml @@ -26,3 +26,6 @@ clouds: username: stack password: stack project_name: stack +ansible: + use_hostnames: True + expand_hostvars: False From 6312e38133e79674910b2cb8c1b1aa695c6816fc Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 14 Dec 2015 10:35:38 -0500 Subject: [PATCH 0131/1113] Fixing up some non-py3 things for unit tests --- lib/ansible/executor/task_queue_manager.py | 2 +- lib/ansible/module_utils/known_hosts.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 9189ab95819..dae70a12925 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -99,7 +99,7 @@ class TaskQueueManager: def _initialize_processes(self, num): self._workers = [] - for i in xrange(num): + for i in range(num): main_q = multiprocessing.Queue() rslt_q = multiprocessing.Queue() self._workers.append([None, main_q, rslt_q]) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index d2644d97666..2824836650a 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -169,7 +169,7 @@ def add_host_key(module, fqdn, key_type="rsa", create_dir=False): if not os.path.exists(user_ssh_dir): if create_dir: try: - os.makedirs(user_ssh_dir, 0700) + os.makedirs(user_ssh_dir, 0o700) except: module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) else: From 80d23d639c2351ab6d0951763ca101516f0f2eb7 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 14 Dec 2015 10:43:30 -0500 Subject: [PATCH 0132/1113] Use an octal representation that works from 2.4->3+ for known_hosts --- lib/ansible/module_utils/known_hosts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 2824836650a..9b6af2a28e9 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -169,7 +169,7 @@ def add_host_key(module, fqdn, key_type="rsa", create_dir=False): if not os.path.exists(user_ssh_dir): if create_dir: try: - os.makedirs(user_ssh_dir, 0o700) + os.makedirs(user_ssh_dir, int('700', 8)) except: module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) else: From c9eb41109f83358d8d968457728996f60b30b933 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 14 Dec 2015 08:03:56 -0800 Subject: [PATCH 0133/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 0d23b3df526..e6b7b17326b 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 0d23b3df526875c8fc6edf94268f3aa850ec05f1 +Subproject commit e6b7b17326b4c9d11501112270c52ae25955938a diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 51813e00333..f3251de29cb 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 51813e003331c3341b07c5cda33346cada537a3b +Subproject commit f3251de29cb10664b2c63a0021530c3fe34111a3 From 457f86f61a3bef95b562dbf91b523c563bff2f63 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 14 Dec 2015 08:50:37 -0800 Subject: [PATCH 0134/1113] Minor: Correct type pyhton => python --- test/integration/roles/test_docker/tasks/main.yml | 2 +- test/units/plugins/cache/test_cache.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml index 2ea15644d5f..76b3fa70702 100644 --- a/test/integration/roles/test_docker/tasks/main.yml +++ b/test/integration/roles/test_docker/tasks/main.yml @@ -3,7 +3,7 @@ #- include: docker-setup-rht.yml # Packages on RHEL and CentOS 7 are broken, broken, broken. Revisit when # they've got that sorted out - # CentOS 6 currently broken by conflicting files in pyhton-backports and python-backports-ssl_match_hostname + # CentOS 6 currently broken by conflicting files in python-backports and python-backports-ssl_match_hostname #when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6 # python-docker isn't available until 14.10. Revist at the next Ubuntu LTS diff --git a/test/units/plugins/cache/test_cache.py b/test/units/plugins/cache/test_cache.py index af1d924910d..0547ba55bf0 100644 --- a/test/units/plugins/cache/test_cache.py +++ b/test/units/plugins/cache/test_cache.py @@ -110,6 +110,6 @@ class TestAbstractClass(unittest.TestCase): def test_memory_cachemodule(self): self.assertIsInstance(MemoryCache(), MemoryCache) - @unittest.skipUnless(HAVE_REDIS, 'Redis pyhton module not installed') + @unittest.skipUnless(HAVE_REDIS, 'Redis python module not installed') def test_redis_cachemodule(self): self.assertIsInstance(RedisCache(), RedisCache) From e595c501976d5f378414dec90543151d7319253b Mon Sep 17 00:00:00 2001 From: gp <gparent@users.noreply.github.com> Date: Mon, 14 Dec 2015 12:06:35 -0500 Subject: [PATCH 0135/1113] Fix typo in galaxy.rst Fix typo --- docsite/rst/galaxy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 783ac15e456..c9dea273367 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -126,7 +126,7 @@ The above will create the following directory structure in the current working d :: README.md - .travsis.yml + .travis.yml defaults/ main.yml files/ From a7ac98262d94cc24a584b8e163cebc0a2a492cd6 Mon Sep 17 00:00:00 2001 From: Michael Scherer <misc@zarb.org> Date: Sat, 12 Dec 2015 20:18:36 +0100 Subject: [PATCH 0136/1113] Make module_utils.known_hosts.get_fqdn work on ipv6 --- lib/ansible/module_utils/known_hosts.py | 16 +++++++++------- .../units/module_utils/basic/test_known_hosts.py | 8 ++++++++ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 9b6af2a28e9..64ad0c76c2b 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -74,12 +74,12 @@ def get_fqdn(repo_url): if "@" in repo_url and "://" not in repo_url: # most likely an user@host:path or user@host/path type URL repo_url = repo_url.split("@", 1)[1] - if ":" in repo_url: - repo_url = repo_url.split(":")[0] - result = repo_url + if repo_url.startswith('['): + result = repo_url.split(']', 1)[0] + ']' + elif ":" in repo_url: + result = repo_url.split(":")[0] elif "/" in repo_url: - repo_url = repo_url.split("/")[0] - result = repo_url + result = repo_url.split("/")[0] elif "://" in repo_url: # this should be something we can parse with urlparse parts = urlparse.urlparse(repo_url) @@ -87,11 +87,13 @@ def get_fqdn(repo_url): # ensure we actually have a parts[1] before continuing. if parts[1] != '': result = parts[1] - if ":" in result: - result = result.split(":")[0] if "@" in result: result = result.split("@", 1)[1] + if result[0].startswith('['): + result = result.split(']', 1)[0] + ']' + elif ":" in result: + result = result.split(":")[0] return result def check_hostkey(module, fqdn): diff --git a/test/units/module_utils/basic/test_known_hosts.py b/test/units/module_utils/basic/test_known_hosts.py index 952184bfec9..515d67686de 100644 --- a/test/units/module_utils/basic/test_known_hosts.py +++ b/test/units/module_utils/basic/test_known_hosts.py @@ -33,6 +33,14 @@ class TestAnsibleModuleKnownHosts(unittest.TestCase): {'is_ssh_url': True, 'get_fqdn': 'five.example.org'}, 'ssh://six.example.org:21/example.org': {'is_ssh_url': True, 'get_fqdn': 'six.example.org'}, + 'ssh://[2001:DB8::abcd:abcd]/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, + 'ssh://[2001:DB8::abcd:abcd]:22/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, + 'username@[2001:DB8::abcd:abcd]/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, + 'username@[2001:DB8::abcd:abcd]:22/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, } def test_is_ssh_url(self): From 8d16638fec3e88e0f7b0dde24aae095100436644 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 14 Dec 2015 10:54:10 -0800 Subject: [PATCH 0137/1113] Fix for template module not creating a file that was not present when force=false --- lib/ansible/plugins/action/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 109f3e80c0b..d134f80a8df 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -150,7 +150,7 @@ class ActionModule(ActionBase): diff = {} new_module_args = self._task.args.copy() - if force and local_checksum != remote_checksum: + if (remote_checksum == '1') or (force and local_checksum != remote_checksum): result['changed'] = True # if showing diffs, we need to get the remote value From 73160e65e5708a506db2682348bf69d9ea97d3b9 Mon Sep 17 00:00:00 2001 From: Krzysztof Jurewicz <krzysztof.jurewicz@gmail.com> Date: Tue, 15 Dec 2015 12:03:50 +0100 Subject: [PATCH 0138/1113] Use wrapped connect_to_region everywhere in ec2.py --- contrib/inventory/ec2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index 4c5cf23fcb8..ff13aa9d05d 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -511,7 +511,7 @@ class Ec2Inventory(object): # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: - conn = elasticache.connect_to_region(region) + conn = self.connect_to_aws(elasticache, region) if conn: # show_cache_node_info = True # because we also want nodes' information @@ -547,7 +547,7 @@ class Ec2Inventory(object): # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: - conn = elasticache.connect_to_region(region) + conn = self.connect_to_aws(elasticache, region) if conn: response = conn.describe_replication_groups() From 27cd7668c152c5b2b74a10ffe78bfca7a11aeaac Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 8 Dec 2015 07:34:09 -0500 Subject: [PATCH 0139/1113] the ssh shared module will try to use keys if the password is not supplied The current ssh shared module forces only password based authentication. This change will allow the ssh module to use keys if a password is not provided. --- lib/ansible/module_utils/ssh.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/ssh.py b/lib/ansible/module_utils/ssh.py index 343f017a988..00922ef8cdd 100644 --- a/lib/ansible/module_utils/ssh.py +++ b/lib/ansible/module_utils/ssh.py @@ -91,12 +91,17 @@ class Ssh(object): def __init__(self): self.client = None - def open(self, host, port=22, username=None, password=None, timeout=10): + def open(self, host, port=22, username=None, password=None, + timeout=10, key_filename=None): + ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + use_keys = password is None + ssh.connect(host, port=port, username=username, password=password, - timeout=timeout, allow_agent=False, look_for_keys=False) + timeout=timeout, allow_agent=use_keys, look_for_keys=use_keys, + key_filename=key_filename) self.client = ssh return self.on_open() From be4d1f9ee380705768574baefb75830e3c76afa2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= <yannig.perre@gmail.com> Date: Tue, 15 Dec 2015 12:49:20 +0100 Subject: [PATCH 0140/1113] Fix a part of python 3 tests (make tests-py3, see https://github.com/ansible/ansible/issues/13553 for more details). --- lib/ansible/module_utils/known_hosts.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 64ad0c76c2b..52b0bb74b0f 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -28,7 +28,11 @@ import os import hmac -import urlparse + +try: + import urlparse +except ImportError: + import urllib.parse as urlparse try: from hashlib import sha1 From a0842781a6a77a0e51ad411ab186395379cc4dcb Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 15 Dec 2015 08:44:43 -0500 Subject: [PATCH 0141/1113] renamed ssh.py shared module file to clarify --- lib/ansible/module_utils/{ssh.py => issh.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename lib/ansible/module_utils/{ssh.py => issh.py} (100%) diff --git a/lib/ansible/module_utils/ssh.py b/lib/ansible/module_utils/issh.py similarity index 100% rename from lib/ansible/module_utils/ssh.py rename to lib/ansible/module_utils/issh.py From be5488cb60869c67b0ea521a4044062157817e50 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 15 Dec 2015 09:27:53 -0500 Subject: [PATCH 0142/1113] clean debug output to match prev versions --- lib/ansible/plugins/callback/__init__.py | 6 ++++++ lib/ansible/plugins/callback/default.py | 1 + lib/ansible/plugins/callback/minimal.py | 1 + 3 files changed, 8 insertions(+) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index b8a48943f28..7371fe0a51e 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -140,6 +140,12 @@ class CallbackBase: else: self.v2_playbook_item_on_ok(newres) + def _clean_results(self, result, task_name): + if 'changed' in result and task_name in ['debug']: + del result['changed'] + if 'invocation' in result and task_name in ['debug']: + del result['invocation'] + def set_play_context(self, play_context): pass diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 1f37f4b975e..e515945bba5 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -62,6 +62,7 @@ class CallbackModule(CallbackBase): def v2_runner_on_ok(self, result): + self._clean_results(result._result, result._task.action) delegated_vars = result._result.get('_ansible_delegated_vars', None) if result._task.action == 'include': return diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index f855c1a6e53..71f9f5dfeef 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -64,6 +64,7 @@ class CallbackModule(CallbackBase): self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red') def v2_runner_on_ok(self, result): + self._clean_results(result._result, result._task.action) if result._task.action in C.MODULE_NO_JSON: self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color='green') else: From fcc9258b743d2f596628f28dd4cdc01f0f8d306e Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 16 Dec 2015 01:48:22 -0500 Subject: [PATCH 0143/1113] Use the original host rather than the serialized one when processing results Fixes #13526 Fixes #13564 Fixes #13566 --- lib/ansible/plugins/strategy/__init__.py | 25 +++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 4047bde73a2..d2d79d036bd 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -185,10 +185,20 @@ class StrategyBase: result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result],)) + # helper method, used to find the original host from the one + # returned in the result/message, which has been serialized and + # thus had some information stripped from it to speed up the + # serialization process + def get_original_host(host): + if host.name in self._inventory._hosts_cache: + return self._inventory._hosts_cache[host.name] + else: + return self._inventory.get_host(host.name) + # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] - host = task_result._host + host = get_original_host(task_result._host) task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed(): if not task.ignore_errors: @@ -244,7 +254,7 @@ class StrategyBase: self._add_host(new_host_info, iterator) elif result[0] == 'add_group': - host = result[1] + host = get_original_host(result[1]) result_item = result[2] self._add_group(host, result_item) @@ -252,19 +262,20 @@ class StrategyBase: task_result = result[1] handler_name = result[2] - original_task = iterator.get_original_task(task_result._host, task_result._task) + original_host = get_original_host(task_result._host) + original_task = iterator.get_original_task(original_host, task_result._task) if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] - if task_result._host not in self._notified_handlers[handler_name]: - self._notified_handlers[handler_name].append(task_result._host) + if original_host not in self._notified_handlers[handler_name]: + self._notified_handlers[handler_name].append(original_host) display.vv("NOTIFIED HANDLER %s" % (handler_name,)) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars and # the variable goes in the fact_cache - host = result[1] + host = get_original_host(result[1]) task = result[2] var_value = wrap_var(result[3]) var_name = task.register @@ -278,7 +289,7 @@ class StrategyBase: self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): - host = result[1] + host = get_original_host(result[1]) task = result[2] item = result[3] From 9942d71d345cf221dbcdb19f362d80430d995905 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Wed, 16 Dec 2015 01:37:02 -0800 Subject: [PATCH 0144/1113] Test for filename option in apt_repository module. --- .../roles/test_apt_repository/tasks/apt.yml | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/test/integration/roles/test_apt_repository/tasks/apt.yml b/test/integration/roles/test_apt_repository/tasks/apt.yml index 49d13bc52a3..9c8e3ab4473 100644 --- a/test/integration/roles/test_apt_repository/tasks/apt.yml +++ b/test/integration/roles/test_apt_repository/tasks/apt.yml @@ -2,6 +2,7 @@ - set_fact: test_ppa_name: 'ppa:menulibre-dev/devel' + test_ppa_filename: 'menulibre-dev' test_ppa_spec: 'deb http://ppa.launchpad.net/menulibre-dev/devel/ubuntu {{ansible_distribution_release}} main' test_ppa_key: 'A7AD98A1' # http://keyserver.ubuntu.com:11371/pks/lookup?search=0xD06AAF4C11DAB86DF421421EFE6B20ECA7AD98A1&op=index @@ -144,6 +145,47 @@ - name: 'ensure ppa key is absent (expect: pass)' apt_key: id='{{test_ppa_key}}' state=absent +# +# TEST: apt_repository: repo=<spec> filename=<filename> +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: 'name=<spec> filename=<filename> (expect: pass)' + apt_repository: repo='{{test_ppa_spec}}' filename='{{test_ppa_filename}}' state=present + register: result + +- assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_spec}}"' + +- name: 'examine source file' + stat: path='/etc/apt/sources.list.d/{{test_ppa_filename}}.list' + register: source_file + +- name: 'assert source file exists' + assert: + that: + - 'source_file.stat.exists == True' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did change' + assert: + that: + - 'cache_before.stat.mtime != cache_after.stat.mtime' + +# When installing a repo with the spec, the key is *NOT* added +- name: 'ensure ppa key is absent (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=absent + # # TEARDOWN # From 63b624707d0bcb057cec7c81d86b511106cba512 Mon Sep 17 00:00:00 2001 From: David <vagercai@gmail.com> Date: Wed, 16 Dec 2015 23:46:06 +0800 Subject: [PATCH 0145/1113] Fix typo --- docsite/rst/playbooks_roles.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 516403ac805..c6c01db5d48 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -132,7 +132,7 @@ Note that you cannot do variable substitution when including one playbook inside another. .. note:: - You can not conditionally path the location to an include file, + You can not conditionally pass the location to an include file, like you can with 'vars_files'. If you find yourself needing to do this, consider how you can restructure your playbook to be more class/role oriented. This is to say you cannot use a 'fact' to From 73ead4fbbadb8ad874f95f0dd542256b2ad730aa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 14 Dec 2015 20:05:55 -0800 Subject: [PATCH 0146/1113] First attempt to fix https certificate errors through a proxy with python-2.7.9+ Fixes #12549 --- lib/ansible/module_utils/urls.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 979d5943dde..0f45c360349 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -326,11 +326,15 @@ class CustomHTTPSConnection(httplib.HTTPSConnection): sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) else: sock = socket.create_connection((self.host, self.port), self.timeout) + + server_hostname = self.host if self._tunnel_host: self.sock = sock self._tunnel() + server_hostname = self._tunnel_host + if HAS_SSLCONTEXT: - self.sock = self.context.wrap_socket(sock, server_hostname=self.host) + self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) else: self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) @@ -542,7 +546,7 @@ class SSLValidationHandler(urllib2.BaseHandler): connect_result = s.recv(4096) self.validate_proxy_response(connect_result) if context: - ssl_s = context.wrap_socket(s, server_hostname=proxy_parts.get('hostname')) + ssl_s = context.wrap_socket(s, server_hostname=self.hostname) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) From 72a0654b81aec47e9fa989ba8c1d50a55a093f6f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 15 Dec 2015 15:35:13 -0800 Subject: [PATCH 0147/1113] Fixes for proxy on RHEL5 --- lib/ansible/module_utils/urls.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 0f45c360349..d0ee260e17f 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -328,6 +328,8 @@ class CustomHTTPSConnection(httplib.HTTPSConnection): sock = socket.create_connection((self.host, self.port), self.timeout) server_hostname = self.host + # Note: self._tunnel_host is not available on py < 2.6 but this code + # isn't used on py < 2.6 (lack of create_connection) if self._tunnel_host: self.sock = sock self._tunnel() @@ -377,7 +379,10 @@ def generic_urlparse(parts): # get the username, password, etc. try: netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$') - (auth, hostname, port) = netloc_re.match(parts[1]) + match = netloc_re.match(parts[1]) + auth = match.group(1) + hostname = match.group(2) + port = match.group(3) if port: # the capture group for the port will include the ':', # so remove it and convert the port to an integer @@ -387,6 +392,8 @@ def generic_urlparse(parts): # and then split it up based on the first ':' found auth = auth[:-1] username, password = auth.split(':', 1) + else: + username = password = None generic_parts['username'] = username generic_parts['password'] = password generic_parts['hostname'] = hostname @@ -394,7 +401,7 @@ def generic_urlparse(parts): except: generic_parts['username'] = None generic_parts['password'] = None - generic_parts['hostname'] = None + generic_parts['hostname'] = parts[1] generic_parts['port'] = None return generic_parts @@ -536,7 +543,8 @@ class SSLValidationHandler(urllib2.BaseHandler): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if https_proxy: proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy)) - s.connect((proxy_parts.get('hostname'), proxy_parts.get('port'))) + port = proxy_parts.get('port') or 443 + s.connect((proxy_parts.get('hostname'), port)) if proxy_parts.get('scheme') == 'http': s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port)) if proxy_parts.get('username'): From 33863eb653f3ed4d6f30ab816743443f473c5eae Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Dec 2015 07:38:51 -0800 Subject: [PATCH 0148/1113] Conditionally create the CustomHTTPSConnection class only if we have the required baseclasses. Fixes #11918 --- lib/ansible/module_utils/urls.py | 66 +++++++++++++++++--------------- 1 file changed, 35 insertions(+), 31 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index d0ee260e17f..41613f6cb61 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -310,42 +310,45 @@ class NoSSLError(SSLValidationError): """Needed to connect to an HTTPS url but no ssl library available to verify the certificate""" pass +# Some environments (Google Compute Engine's CoreOS deploys) do not compile +# against openssl and thus do not have any HTTPS support. +CustomHTTPSConnection = CustomHTTPSHandler = None +if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib2, 'HTTPSHandler'): + class CustomHTTPSConnection(httplib.HTTPSConnection): + def __init__(self, *args, **kwargs): + httplib.HTTPSConnection.__init__(self, *args, **kwargs) + if HAS_SSLCONTEXT: + self.context = create_default_context() + if self.cert_file: + self.context.load_cert_chain(self.cert_file, self.key_file) -class CustomHTTPSConnection(httplib.HTTPSConnection): - def __init__(self, *args, **kwargs): - httplib.HTTPSConnection.__init__(self, *args, **kwargs) - if HAS_SSLCONTEXT: - self.context = create_default_context() - if self.cert_file: - self.context.load_cert_chain(self.cert_file, self.key_file) + def connect(self): + "Connect to a host on a given (SSL) port." - def connect(self): - "Connect to a host on a given (SSL) port." + if hasattr(self, 'source_address'): + sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) + else: + sock = socket.create_connection((self.host, self.port), self.timeout) - if hasattr(self, 'source_address'): - sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) - else: - sock = socket.create_connection((self.host, self.port), self.timeout) + server_hostname = self.host + # Note: self._tunnel_host is not available on py < 2.6 but this code + # isn't used on py < 2.6 (lack of create_connection) + if self._tunnel_host: + self.sock = sock + self._tunnel() + server_hostname = self._tunnel_host - server_hostname = self.host - # Note: self._tunnel_host is not available on py < 2.6 but this code - # isn't used on py < 2.6 (lack of create_connection) - if self._tunnel_host: - self.sock = sock - self._tunnel() - server_hostname = self._tunnel_host + if HAS_SSLCONTEXT: + self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) + else: + self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) - if HAS_SSLCONTEXT: - self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) - else: - self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) + class CustomHTTPSHandler(urllib2.HTTPSHandler): -class CustomHTTPSHandler(urllib2.HTTPSHandler): + def https_open(self, req): + return self.do_open(CustomHTTPSConnection, req) - def https_open(self, req): - return self.do_open(CustomHTTPSConnection, req) - - https_request = urllib2.AbstractHTTPHandler.do_request_ + https_request = urllib2.AbstractHTTPHandler.do_request_ def generic_urlparse(parts): ''' @@ -673,8 +676,9 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, handlers.append(proxyhandler) # pre-2.6 versions of python cannot use the custom https - # handler, since the socket class is lacking this method - if hasattr(socket, 'create_connection'): + # handler, since the socket class is lacking create_connection. + # Some python builds lack HTTPS support. + if hasattr(socket, 'create_connection') and CustomHTTPSHandler: handlers.append(CustomHTTPSHandler) opener = urllib2.build_opener(*handlers) From 0095d04af9712c0c026b29e45dbe57a70e30f1e0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Dec 2015 08:02:46 -0800 Subject: [PATCH 0149/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e6b7b17326b..50e7bff5546 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e6b7b17326b4c9d11501112270c52ae25955938a +Subproject commit 50e7bff554647ccd8a34729171420e72b3a00c61 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f3251de29cb..bde5686552f 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f3251de29cb10664b2c63a0021530c3fe34111a3 +Subproject commit bde5686552fdd88a758c7197b2eebe98b1afbf07 From 6a252a3f7727649c61c007e73f04201fd6fbdfa8 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 16 Dec 2015 11:21:19 -0500 Subject: [PATCH 0150/1113] Preserve the cumulative path for checking includes which have parents Otherwise, each relative include path is checked on its own, rather than in relation to the (possibly relative) path of its parent, meaning includes multiple level deep may fail to find the correct (or any) file. Fixes #13472 --- lib/ansible/playbook/included_file.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py index b7c0fb81756..7fb851a12af 100644 --- a/lib/ansible/playbook/included_file.py +++ b/lib/ansible/playbook/included_file.py @@ -81,14 +81,19 @@ class IncludedFile: # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = original_task._task_include + cumulative_path = None while parent_include is not None: parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params'))) + if cumulative_path is None: + cumulative_path = parent_include_dir + elif not os.path.isabs(cumulative_path): + cumulative_path = os.path.join(parent_include_dir, cumulative_path) include_target = templar.template(include_result['include']) if original_task._role: - new_basedir = os.path.join(original_task._role._role_path, 'tasks', parent_include_dir) + new_basedir = os.path.join(original_task._role._role_path, 'tasks', cumulative_path) include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target) else: - include_file = loader.path_dwim_relative(loader.get_basedir(), parent_include_dir, include_target) + include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): break From 375eb501b3b1edf7fd91807374edfcd60ca736b8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Dec 2015 09:40:01 -0800 Subject: [PATCH 0151/1113] Update url to site that has an invalid certificate --- test/integration/roles/test_get_url/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 6e3842f6abf..09ee34277a0 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -28,7 +28,7 @@ - name: test https fetch to a site with mismatched hostname and certificate get_url: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result @@ -46,7 +46,7 @@ - name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no get_url: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result From 34e88e48a567d52e3ed0c3ecb6a5aa578e53dd19 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy <jon@soh.re> Date: Mon, 16 Nov 2015 22:08:15 -0500 Subject: [PATCH 0152/1113] Add shared connection code for mysql modules --- lib/ansible/module_utils/mysql.py | 66 +++++++++++++++ .../utils/module_docs_fragments/mysql.py | 84 +++++++++++++++++++ .../tasks/user_password_update_test.yml | 1 - .../tasks/assert_fail_msg.yml | 2 - 4 files changed, 150 insertions(+), 3 deletions(-) create mode 100644 lib/ansible/module_utils/mysql.py create mode 100644 lib/ansible/utils/module_docs_fragments/mysql.py diff --git a/lib/ansible/module_utils/mysql.py b/lib/ansible/module_utils/mysql.py new file mode 100644 index 00000000000..48e00adfd9c --- /dev/null +++ b/lib/ansible/module_utils/mysql.py @@ -0,0 +1,66 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Jonathan Mainguy <jon@soh.re>, 2015 +# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None): + config = { + 'host': module.params['login_host'], + 'ssl': { + } + } + + if module.params['login_unix_socket']: + config['unix_socket'] = module.params['login_unix_socket'] + else: + config['port'] = module.params['login_port'] + + if os.path.exists(config_file): + config['read_default_file'] = config_file + + # If login_user or login_password are given, they should override the + # config file + if login_user is not None: + config['user'] = login_user + if login_password is not None: + config['passwd'] = login_password + if ssl_cert is not None: + config['ssl']['cert'] = ssl_cert + if ssl_key is not None: + config['ssl']['key'] = ssl_key + if ssl_ca is not None: + config['ssl']['ca'] = ssl_ca + if db is not None: + config['db'] = db + + db_connection = MySQLdb.connect(**config) + if cursor_class is not None: + return db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor) + else: + return db_connection.cursor() diff --git a/lib/ansible/utils/module_docs_fragments/mysql.py b/lib/ansible/utils/module_docs_fragments/mysql.py new file mode 100644 index 00000000000..5dd1e04f93b --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/mysql.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2015 Jonathan Mainguy <jon@soh.re> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + + +class ModuleDocFragment(object): + + # Standard mysql documentation fragment + DOCUMENTATION = ''' +options: + login_user: + description: + - The username used to authenticate with + required: false + default: null + login_password: + description: + - The password used to authenticate with + required: false + default: null + login_host: + description: + - Host running the database + required: false + default: localhost + login_port: + description: + - Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used + required: false + default: 3306 + login_unix_socket: + description: + - The path to a Unix domain socket for local connections + required: false + default: null + config_file: + description: + - Specify a config file from which user and password are to be read + required: false + default: '~/.my.cnf' + version_added: "2.0" + ssl_ca: + required: false + default: null + version_added: "2.0" + description: + - The path to a Certificate Authority (CA) certificate. This option, if used, must specify the same certificate as used by the server. + ssl_cert: + required: false + default: null + version_added: "2.0" + description: + - The path to a client public key certificate. + ssl_key: + required: false + default: null + version_added: "2.0" + description: + - The path to the client private key. +requirements: + - MySQLdb +notes: + - Requires the MySQLdb Python package on the remote host. For Ubuntu, this + is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this + is as easy as yum install MySQL-python. (See M(yum).) + - Both C(login_password) and C(login_user) are required when you are + passing credentials. If none are present, the module will attempt to read + the credentials from C(~/.my.cnf), and finally fall back to using the MySQL + default login of 'root' with no password. +''' diff --git a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml index 50307cef956..904165c33ec 100644 --- a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml +++ b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml @@ -63,7 +63,6 @@ assert: that: - "result.failed == true" - - "'check login credentials (login_user, and login_password' in result.msg" - name: create database using user2 and new password mysql_db: name={{ db_name }} state=present login_user={{ user_name_2 }} login_password={{ user_password_1 }} diff --git a/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml b/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml index 70aa26856ed..ba51b9d67cb 100644 --- a/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml +++ b/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml @@ -23,5 +23,3 @@ assert: that: - "output.failed == true" - - "'{{msg}}' in output.msg" - From 851c0058b148ce041af5ca5c9fbdf25ff854cf8f Mon Sep 17 00:00:00 2001 From: Chrrrles Paul <chrrrles@users.noreply.github.com> Date: Wed, 16 Dec 2015 12:45:05 -0600 Subject: [PATCH 0153/1113] Removing yaml support for path: --- docsite/rst/galaxy.rst | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index c9dea273367..f4ca16cb8f1 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -73,10 +73,6 @@ And here's an example showing some specific version downloads from multiple sour # from GitHub - src: https://github.com/bennojoy/nginx - # from GitHub installing to a relative path - - src: https://github.com/bennojoy/nginx - path: vagrant/roles/ - # from GitHub, overriding the name and specifying a specific tag - src: https://github.com/bennojoy/nginx version: master @@ -98,7 +94,6 @@ And here's an example showing some specific version downloads from multiple sour - src: git@gitlab.company.com:mygroup/ansible-base.git scm: git version: 0.1.0 - path: roles/ As you can see in the above, there are a large amount of controls available to customize where roles can be pulled from, and what to save roles as. From 6109f703970d741df6e2e28e750667f5d0083fda Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 16 Dec 2015 13:56:55 -0500 Subject: [PATCH 0154/1113] Attempt at fixing strategy unit test failures on py2.6 and py3 --- test/units/plugins/strategies/test_strategy_base.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 7cc81a0324e..53e243f926b 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -377,9 +377,7 @@ class TestStrategyBase(unittest.TestCase): strategy_base._inventory = mock_inventory strategy_base._notified_handlers = {"test handler": [mock_host]} - mock_return_task = MagicMock(Handler) - mock_return_host = MagicMock(Host) - task_result = TaskResult(mock_return_host, mock_return_task, dict(changed=False)) + task_result = TaskResult(Host('host01'), Handler(), dict(changed=False)) tqm._final_q.put(('host_task_ok', task_result)) result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context) From 9724117bbb6c09a4d6d2e1f6573e69db697bdcc7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Dec 2015 11:15:39 -0800 Subject: [PATCH 0155/1113] Update submodule refs for mysql refactor --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 50e7bff5546..3c48320b295 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 50e7bff554647ccd8a34729171420e72b3a00c61 +Subproject commit 3c48320b295c3b4f99caccdc5f173b224109a393 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index bde5686552f..8ec4f95ffd6 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit bde5686552fdd88a758c7197b2eebe98b1afbf07 +Subproject commit 8ec4f95ffd6d4e837cf0f3dd28649fb09afd0caf From baece499dfb6a8d8556db2b686d4f3c86d1d25b1 Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Wed, 16 Dec 2015 11:47:12 -0800 Subject: [PATCH 0156/1113] fix plugin loading for Windows modules force plugin loader to only consider .py files, since that's the only place docs can live ATM... --- lib/ansible/cli/doc.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index a17164eb50e..265b1c9a3fc 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -90,7 +90,8 @@ class DocCLI(CLI): for module in self.args: try: - filename = module_loader.find_plugin(module) + # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs + filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) continue @@ -167,7 +168,8 @@ class DocCLI(CLI): if module in module_docs.BLACKLIST_MODULES: continue - filename = module_loader.find_plugin(module) + # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs + filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: continue From 491fd754f1cbe1944b0f45690842fd49b5977775 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 16 Dec 2015 16:35:56 -0500 Subject: [PATCH 0157/1113] Updating the porting guide to note the complex args/bare vars change Related to #13518 --- docsite/rst/porting_guide_2.0.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 8d69ecd4403..543be052bdc 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -55,6 +55,24 @@ uses key=value escaping which has not changed. The other option is to check for # Output "msg": "Testing some things" +* When specifying complex args as a variable, the variable must use the full jinja2 + variable syntax ('{{var_name}}') - bare variable names there are no longer accepted. + In fact, even specifying args with variables has been deprecated, and will not be + allowed in future versions:: + + --- + - hosts: localhost + connection: local + gather_facts: false + vars: + my_dirs: + - { path: /tmp/3a, state: directory, mode: 0755 } + - { path: /tmp/3b, state: directory, mode: 0700 } + tasks: + - file: + args: "{{item}}" # <- args here uses the full variable syntax + with_items: my_dirs + * porting task includes * More dynamic. Corner-case formats that were not supposed to work now do not, as expected. * variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 From 8716bf8021800a18cb8d6cfea3f296ba4f834692 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 16 Dec 2015 16:32:06 -0500 Subject: [PATCH 0158/1113] All variables in complex args again Also updates the CHANGELOG to note the slight change, where bare variables in args are no longer allowed to be bare variables Fixes #13518 --- CHANGELOG.md | 20 ++++++++++++++++++++ lib/ansible/parsing/mod_args.py | 11 ++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6319634fb7..005171ec9a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,26 @@ newline being stripped you can change your playbook like this: "msg": "Testing some things" ``` +* When specifying complex args as a variable, the variable must use the full jinja2 +variable syntax ('{{var_name}}') - bare variable names there are no longer accepted. +In fact, even specifying args with variables has been deprecated, and will not be +allowed in future versions: + + ``` + --- + - hosts: localhost + connection: local + gather_facts: false + vars: + my_dirs: + - { path: /tmp/3a, state: directory, mode: 0755 } + - { path: /tmp/3b, state: directory, mode: 0700 } + tasks: + - file: + args: "{{item}}" + with_items: my_dirs + ``` + ###Plugins * Rewritten dnf module that should be faster and less prone to encountering bugs in cornercases diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index abc35a415e3..86b2d0d996d 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -137,7 +137,16 @@ class ModuleArgsParser: # than those which may be parsed/normalized next final_args = dict() if additional_args: - final_args.update(additional_args) + if isinstance(additional_args, string_types): + templar = Templar(loader=None) + if templar._contains_vars(additional_args): + final_args['_variable_params'] = additional_args + else: + raise AnsibleParserError("Complex args containing variables cannot use bare variables, and must use the full variable style ('{{var_name}}')") + elif isinstance(additional_args, dict): + final_args.update(additional_args) + else: + raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").') # how we normalize depends if we figured out what the module name is # yet. If we have already figured it out, it's an 'old style' invocation. From fffd29d1ab15dc93a2854f874695b63e15d5c198 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Dec 2015 14:06:11 -0800 Subject: [PATCH 0159/1113] Update mysql setup to handle installing mysql with dnf too. --- test/integration/roles/setup_mysql_db/tasks/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/integration/roles/setup_mysql_db/tasks/main.yml b/test/integration/roles/setup_mysql_db/tasks/main.yml index a8010e71389..612d94f6d11 100644 --- a/test/integration/roles/setup_mysql_db/tasks/main.yml +++ b/test/integration/roles/setup_mysql_db/tasks/main.yml @@ -31,6 +31,11 @@ with_items: mysql_packages when: ansible_pkg_mgr == 'yum' +- name: install mysqldb_test rpm dependencies + dnf: name={{ item }} state=latest + with_items: mysql_packages + when: ansible_pkg_mgr == 'dnf' + - name: install mysqldb_test debian dependencies apt: name={{ item }} state=latest with_items: mysql_packages From fd4ad2c8f24be48e2fa103a6b8feae287c4b57fe Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Dec 2015 14:08:08 -0800 Subject: [PATCH 0160/1113] Update submodule ref to fix a bug in mysql_user with mariadb --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 3c48320b295..16a3bdaa7da 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 3c48320b295c3b4f99caccdc5f173b224109a393 +Subproject commit 16a3bdaa7da9e9f7c0572d3a3fdbfd79f29c2b9d From 857456ea5f159bbd333528aa6111b1510e1be78b Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 16 Dec 2015 18:21:47 -0500 Subject: [PATCH 0161/1113] Fixing template integration test for python 2.6 versions No longer immediately fallback to to_json if simplejson is not installed --- lib/ansible/plugins/filter/core.py | 4 +++- test/integration/roles/test_template/tasks/main.yml | 7 ------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py index 3ab9db5a51b..dc9acb4d092 100644 --- a/lib/ansible/plugins/filter/core.py +++ b/lib/ansible/plugins/filter/core.py @@ -100,9 +100,11 @@ def to_nice_json(a, *args, **kw): else: if major >= 2: return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw) + try: + return json.dumps(a, indent=4, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw) + except: # Fallback to the to_json filter return to_json(a, *args, **kw) - return json.dumps(a, indent=4, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw) def bool(a): ''' return a bool for the arg ''' diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index 28477d44e5b..9fd1d860e00 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -49,13 +49,6 @@ - name: copy known good into place copy: src=foo.txt dest={{output_dir}}/foo.txt -# Seems that python-2.6 now outputs the same format as everywhere else? -# when: pyver.stdout != '2.6' - -#- name: copy known good into place -# copy: src=foo-py26.txt dest={{output_dir}}/foo.txt -# when: pyver.stdout == '2.6' - - name: compare templated file to known good shell: diff {{output_dir}}/foo.templated {{output_dir}}/foo.txt register: diff_result From 15135f3c16a87f68bede61415f2571097eaa6268 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 16 Dec 2015 19:12:05 -0500 Subject: [PATCH 0162/1113] Make sure we're using the original host when processing include results Also fixes a bug where we were passing an incorrect number of parameters to _do_handler_run() when processing an include file in a handler task/block. Fixes #13560 --- lib/ansible/playbook/included_file.py | 15 +++++++++++---- lib/ansible/plugins/strategy/__init__.py | 2 ++ lib/ansible/plugins/strategy/free.py | 10 ++++++++-- lib/ansible/plugins/strategy/linear.py | 10 ++++++++-- 4 files changed, 29 insertions(+), 8 deletions(-) diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py index 7fb851a12af..cc756a75a96 100644 --- a/lib/ansible/playbook/included_file.py +++ b/lib/ansible/playbook/included_file.py @@ -49,9 +49,15 @@ class IncludedFile: return "%s (%s): %s" % (self._filename, self._args, self._hosts) @staticmethod - def process_include_results(results, tqm, iterator, loader, variable_manager): + def process_include_results(results, tqm, iterator, inventory, loader, variable_manager): included_files = [] + def get_original_host(host): + if host.name in inventory._hosts_cache: + return inventory._hosts_cache[host.name] + else: + return inventory.get_host(host.name) + for res in results: if res._task.action == 'include': @@ -67,9 +73,10 @@ class IncludedFile: if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result: continue - original_task = iterator.get_original_task(res._host, res._task) + original_host = get_original_host(res._host) + original_task = iterator.get_original_task(original_host, res._task) - task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=res._host, task=original_task) + task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=original_host, task=original_task) templar = Templar(loader=loader, variables=task_vars) include_variables = include_result.get('include_variables', dict()) @@ -116,6 +123,6 @@ class IncludedFile: except ValueError: included_files.append(inc_file) - inc_file.add_host(res._host) + inc_file.add_host(original_host) return included_files diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index d2d79d036bd..7b2a3794efc 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -576,6 +576,7 @@ class StrategyBase: host_results, self._tqm, iterator=iterator, + inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager ) @@ -594,6 +595,7 @@ class StrategyBase: for task in block.block: result = self._do_handler_run( handler=task, + handler_name=None, iterator=iterator, play_context=play_context, notified_hosts=included_file._hosts[:], diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index 11eeaa92494..f4fc1226a1f 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -139,8 +139,14 @@ class StrategyModule(StrategyBase): host_results.extend(results) try: - included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, - loader=self._loader, variable_manager=self._variable_manager) + included_files = IncludedFile.process_include_results( + host_results, + self._tqm, + iterator=iterator, + inventory=self._inventory, + loader=self._loader, + variable_manager=self._variable_manager + ) except AnsibleError as e: return False diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 8c94267cf46..7bb227dbaea 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -261,8 +261,14 @@ class StrategyModule(StrategyBase): break try: - included_files = IncludedFile.process_include_results(host_results, self._tqm, - iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) + included_files = IncludedFile.process_include_results( + host_results, + self._tqm, + iterator=iterator, + inventory=self._inventory, + loader=self._loader, + variable_manager=self._variable_manager + ) except AnsibleError as e: return False From e5c2c03dea0998872a6b16a18d6c187685a5fc7a Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 15 Dec 2015 09:39:13 -0500 Subject: [PATCH 0163/1113] Enable host_key checking at the strategy level Implements a new method in the ssh connection plugin (fetch_and_store_key) which is used to prefetch the key using ssh-keyscan. --- lib/ansible/executor/task_executor.py | 17 +- lib/ansible/inventory/host.py | 11 +- lib/ansible/plugins/connection/__init__.py | 5 +- lib/ansible/plugins/connection/ssh.py | 193 +++++++++++++++++++-- lib/ansible/plugins/strategy/__init__.py | 30 +++- lib/ansible/utils/connection.py | 50 ++++++ 6 files changed, 273 insertions(+), 33 deletions(-) create mode 100644 lib/ansible/utils/connection.py diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 5d7430fad25..2623bc775b2 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -32,6 +32,7 @@ from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVar from ansible.playbook.conditional import Conditional from ansible.playbook.task import Task from ansible.template import Templar +from ansible.utils.connection import get_smart_connection_type from ansible.utils.encrypt import key_for_hostname from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unicode import to_unicode @@ -564,21 +565,7 @@ class TaskExecutor: conn_type = self._play_context.connection if conn_type == 'smart': - conn_type = 'ssh' - if sys.platform.startswith('darwin') and self._play_context.password: - # due to a current bug in sshpass on OSX, which can trigger - # a kernel panic even for non-privileged users, we revert to - # paramiko on that OS when a SSH password is specified - conn_type = "paramiko" - else: - # see if SSH can support ControlPersist if not use paramiko - try: - cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (out, err) = cmd.communicate() - if "Bad configuration option" in err or "Usage:" in err: - conn_type = "paramiko" - except OSError: - conn_type = "paramiko" + conn_type = get_smart_connection_type(self._play_context) connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin) if not connection: diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index 6263dcbc80d..70f9f57b5f1 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -57,6 +57,7 @@ class Host: name=self.name, vars=self.vars.copy(), address=self.address, + has_hostkey=self.has_hostkey, uuid=self._uuid, gathered_facts=self._gathered_facts, groups=groups, @@ -65,10 +66,11 @@ class Host: def deserialize(self, data): self.__init__() - self.name = data.get('name') - self.vars = data.get('vars', dict()) - self.address = data.get('address', '') - self._uuid = data.get('uuid', uuid.uuid4()) + self.name = data.get('name') + self.vars = data.get('vars', dict()) + self.address = data.get('address', '') + self.has_hostkey = data.get('has_hostkey', False) + self._uuid = data.get('uuid', uuid.uuid4()) groups = data.get('groups', []) for group_data in groups: @@ -89,6 +91,7 @@ class Host: self._gathered_facts = False self._uuid = uuid.uuid4() + self.has_hostkey = False def __repr__(self): return self.get_name() diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 06616bac4ca..7fc19c8c195 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -23,11 +23,11 @@ __metaclass__ = type import fcntl import gettext import os + from abc import ABCMeta, abstractmethod, abstractproperty - from functools import wraps -from ansible.compat.six import with_metaclass +from ansible.compat.six import with_metaclass from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins import shell_loader @@ -233,3 +233,4 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): f = self._play_context.connection_lockfd fcntl.lockf(f, fcntl.LOCK_UN) display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f)) + diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index a2abcf20aee..cce29824e1a 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -19,7 +19,12 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible.compat.six import text_type + +import base64 import fcntl +import hmac +import operator import os import pipes import pty @@ -28,9 +33,13 @@ import shlex import subprocess import time +from hashlib import md5, sha1, sha256 + from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase +from ansible.utils.boolean import boolean +from ansible.utils.connection import get_smart_connection_type from ansible.utils.path import unfrackpath, makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode @@ -41,7 +50,128 @@ except ImportError: display = Display() SSHPASS_AVAILABLE = None +HASHED_KEY_MAGIC = "|1|" +def split_args(argstring): + """ + Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a + list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to + the argument list. The list will not contain any empty elements. + """ + return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] + +def get_ssh_opts(play_context): + # FIXME: caching may help here + opts_dict = dict() + try: + cmd = ['ssh', '-G', play_context.remote_addr] + res = subprocess.check_output(cmd) + for line in res.split('\n'): + if ' ' in line: + (key, val) = line.split(' ', 1) + else: + key = line + val = '' + opts_dict[key.lower()] = val + + # next, we manually override any options that are being + # set via ssh_args or due to the fact that `ssh -G` doesn't + # actually use the options set via -o + for opt in ['ssh_args', 'ssh_common_args', 'ssh_extra_args']: + attr = getattr(play_context, opt, None) + if attr is not None: + args = split_args(attr) + for arg in args: + if '=' in arg: + (key, val) = arg.split('=', 1) + opts_dict[key.lower()] = val + + return opts_dict + except subprocess.CalledProcessError: + return dict() + +def host_in_known_hosts(host, ssh_opts): + # the setting from the ssh_opts may actually be multiple files, so + # we use shlex.split and simply take the first one specified + user_host_file = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) + + host_file_list = [] + host_file_list.append(user_host_file) + host_file_list.append("/etc/ssh/ssh_known_hosts") + host_file_list.append("/etc/ssh/ssh_known_hosts2") + + hfiles_not_found = 0 + for hf in host_file_list: + if not os.path.exists(hf): + continue + try: + host_fh = open(hf) + except (OSError, IOError) as e: + continue + else: + data = host_fh.read() + host_fh.close() + + for line in data.split("\n"): + line = line.strip() + if line is None or " " not in line: + continue + tokens = line.split() + if not tokens: + continue + if tokens[0].find(HASHED_KEY_MAGIC) == 0: + # this is a hashed known host entry + try: + (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2) + hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) + hash.update(host) + if hash.digest() == kn_host.decode('base64'): + return True + except: + # invalid hashed host key, skip it + continue + else: + # standard host file entry + if host in tokens[0]: + return True + + return False + +def fetch_ssh_host_key(play_context, ssh_opts): + keyscan_cmd = ['ssh-keyscan'] + + if play_context.port: + keyscan_cmd.extend(['-p', text_type(play_context.port)]) + + if boolean(ssh_opts.get('hashknownhosts', 'no')): + keyscan_cmd.append('-H') + + keyscan_cmd.append(play_context.remote_addr) + + p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) + (stdout, stderr) = p.communicate() + if stdout == '': + raise AnsibleConnectionFailure("Failed to connect to the host to fetch the host key: %s." % stderr) + else: + return stdout + +def add_host_key(host_key, ssh_opts): + # the setting from the ssh_opts may actually be multiple files, so + # we use shlex.split and simply take the first one specified + user_known_hosts = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) + user_ssh_dir = os.path.dirname(user_known_hosts) + + if not os.path.exists(user_ssh_dir): + raise AnsibleError("the user ssh directory does not exist: %s" % user_ssh_dir) + elif not os.path.isdir(user_ssh_dir): + raise AnsibleError("%s is not a directory" % user_ssh_dir) + + try: + display.vv("adding to known_hosts file: %s" % user_known_hosts) + with open(user_known_hosts, 'a') as f: + f.write(host_key) + except (OSError, IOError) as e: + raise AnsibleError("error when trying to access the known hosts file: '%s', error was: %s" % (user_known_hosts, text_type(e))) class Connection(ConnectionBase): ''' ssh based connections ''' @@ -62,6 +192,56 @@ class Connection(ConnectionBase): def _connect(self): return self + @staticmethod + def fetch_and_store_key(host, play_context): + ssh_opts = get_ssh_opts(play_context) + if not host_in_known_hosts(play_context.remote_addr, ssh_opts): + display.debug("host %s does not have a known host key, fetching it" % host) + + # build the list of valid host key types, for use later as we scan for keys. + # we also use this to determine the most preferred key when multiple keys are available + valid_host_key_types = [x.lower() for x in ssh_opts.get('hostbasedkeytypes', '').split(',')] + + # attempt to fetch the key with ssh-keyscan. More than one key may be + # returned, so we save all and use the above list to determine which + host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') + host_keys = dict() + for host_key in host_key_data: + (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) + key_type = key_type.lower() + if key_type in valid_host_key_types and key_type not in host_keys: + host_keys[key_type.lower()] = host_key + + if len(host_keys) == 0: + raise AnsibleConnectionFailure("none of the available host keys found were in the HostBasedKeyTypes configuration option") + + # now we determine the preferred key by sorting the above dict on the + # index of the key type in the valid keys list + preferred_key = sorted(host_keys.items(), cmp=lambda x,y: cmp(valid_host_key_types.index(x), valid_host_key_types.index(y)), key=operator.itemgetter(0))[0] + + # shamelessly copied from here: + # https://github.com/ojarva/python-sshpubkeys/blob/master/sshpubkeys/__init__.py#L39 + # (which shamelessly copied it from somewhere else...) + (host_info, key_type, key_hash) = preferred_key[1].strip().split(' ', 3) + decoded_key = key_hash.decode('base64') + fp_plain = md5(decoded_key).hexdigest() + key_data = ':'.join(a+b for a, b in zip(fp_plain[::2], fp_plain[1::2])) + + # prompt the user to add the key + # if yes, add it, otherwise raise AnsibleConnectionFailure + display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) + display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) + display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) + response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") + display.display("") + if boolean(response): + add_host_key(host_key, ssh_opts) + return True + else: + raise AnsibleConnectionFailure("Host key validation failed.") + + return False + @staticmethod def _sshpass_available(): global SSHPASS_AVAILABLE @@ -100,15 +280,6 @@ class Connection(ConnectionBase): return controlpersist, controlpath - @staticmethod - def _split_args(argstring): - """ - Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a - list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to - the argument list. The list will not contain any empty elements. - """ - return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] - def _add_args(self, explanation, args): """ Adds the given args to self._command and displays a caller-supplied @@ -157,7 +328,7 @@ class Connection(ConnectionBase): # Next, we add [ssh_connection]ssh_args from ansible.cfg. if self._play_context.ssh_args: - args = self._split_args(self._play_context.ssh_args) + args = split_args(self._play_context.ssh_args) self._add_args("ansible.cfg set ssh_args", args) # Now we add various arguments controlled by configuration file settings @@ -210,7 +381,7 @@ class Connection(ConnectionBase): for opt in ['ssh_common_args', binary + '_extra_args']: attr = getattr(self._play_context, opt, None) if attr is not None: - args = self._split_args(attr) + args = split_args(attr) self._add_args("PlayContext set %s" % opt, args) # Check if ControlPersist is enabled and add a ControlPath if one hasn't diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 7b2a3794efc..e460708f906 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -29,7 +29,7 @@ import zlib from jinja2.exceptions import UndefinedError from ansible import constants as C -from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable +from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure from ansible.executor.play_iterator import PlayIterator from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_result import TaskResult @@ -39,6 +39,7 @@ from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.included_file import IncludedFile from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader from ansible.template import Templar +from ansible.utils.connection import get_smart_connection_type from ansible.vars.unsafe_proxy import wrap_var try: @@ -139,6 +140,33 @@ class StrategyBase: display.debug("entering _queue_task() for %s/%s" % (host, task)) + if C.HOST_KEY_CHECKING and not host.has_hostkey: + # caveat here, regarding with loops. It is assumed that none of the connection + # related variables would contain '{{item}}' as it would cause some really + # weird loops. As is, if someone did something odd like that they would need + # to disable host key checking + templar = Templar(loader=self._loader, variables=task_vars) + temp_pc = play_context.set_task_and_variable_override(task=task, variables=task_vars, templar=templar) + temp_pc.post_validate(templar) + if temp_pc.connection in ('smart', 'ssh') and get_smart_connection_type(temp_pc) == 'ssh': + try: + # get the ssh connection plugin's class, and use its builtin + # static method to fetch and save the key to the known_hosts file + ssh_conn = connection_loader.get('ssh', class_only=True) + ssh_conn.fetch_and_store_key(host, temp_pc) + except AnsibleConnectionFailure as e: + # if that fails, add the host to the list of unreachable + # hosts and send the appropriate callback + self._tqm._unreachable_hosts[host.name] = True + self._tqm._stats.increment('dark', host.name) + tr = TaskResult(host=host, task=task, return_data=dict(msg=text_type(e))) + self._tqm.send_callback('v2_runner_on_unreachable', tr) + return + + # finally, we set the has_hostkey flag to true for this + # host so we can skip it quickly in the future + host.has_hostkey = True + task_vars['hostvars'] = self._tqm.hostvars # and then queue the new task display.debug("%s - putting task (%s) in queue" % (host, task)) diff --git a/lib/ansible/utils/connection.py b/lib/ansible/utils/connection.py new file mode 100644 index 00000000000..6f6b405640e --- /dev/null +++ b/lib/ansible/utils/connection.py @@ -0,0 +1,50 @@ +# (c) 2015, Ansible, Inc. <support@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import subprocess +import sys + + +__all__ = ['get_smart_connection_type'] + +def get_smart_connection_type(play_context): + ''' + Uses the ssh command with the ControlPersist option while checking + for an error to determine if we should use ssh or paramiko. Also + may take other factors into account. + ''' + + conn_type = 'ssh' + if sys.platform.startswith('darwin') and play_context.password: + # due to a current bug in sshpass on OSX, which can trigger + # a kernel panic even for non-privileged users, we revert to + # paramiko on that OS when a SSH password is specified + conn_type = "paramiko" + else: + # see if SSH can support ControlPersist if not use paramiko + try: + cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + if "Bad configuration option" in err or "Usage:" in err: + conn_type = "paramiko" + except OSError: + conn_type = "paramiko" + + return conn_type From d7f2f606e179cf0df4d308a0055b4ad62207b47c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 16 Dec 2015 21:49:33 -0500 Subject: [PATCH 0164/1113] Add has_hostkey to mock objects to fix broken unit tests --- test/units/plugins/strategies/test_strategy_base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 53e243f926b..8d1a1e8adab 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -76,6 +76,7 @@ class TestStrategyBase(unittest.TestCase): for i in range(0, 5): mock_host = MagicMock() mock_host.name = "host%02d" % (i+1) + mock_host.has_hostkey = True mock_hosts.append(mock_host) mock_inventory = MagicMock() @@ -111,6 +112,7 @@ class TestStrategyBase(unittest.TestCase): fake_loader = DictDataLoader() mock_var_manager = MagicMock() mock_host = MagicMock() + mock_host.has_hostkey = True mock_inventory = MagicMock() mock_options = MagicMock() mock_options.module_path = None @@ -171,6 +173,7 @@ class TestStrategyBase(unittest.TestCase): mock_host = MagicMock() mock_host.name = 'test01' mock_host.vars = dict() + mock_host.has_hostkey = True mock_task = MagicMock() mock_task._role = None @@ -347,6 +350,7 @@ class TestStrategyBase(unittest.TestCase): mock_host = MagicMock(Host) mock_host.name = "test01" + mock_host.has_hostkey = True mock_inventory = MagicMock() mock_inventory.get_hosts.return_value = [mock_host] From d9c74536be63cedc3dd1711c73844827990e898d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 09:44:40 -0500 Subject: [PATCH 0165/1113] Fix handling of environment inheritence, and template each inherited env Environments were not being templated individually, so a variable environment value was causing the exception regarding dicts to be hit. Also, environments as inherited were coming through with the tasks listed first, followed by the parents, so they were being merged backwards. Reversing the list of environments fixed this. --- lib/ansible/plugins/action/__init__.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 254bab476bb..e9b18651d66 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -151,14 +151,19 @@ class ActionBase(with_metaclass(ABCMeta, object)): if not isinstance(environments, list): environments = [ environments ] + # the environments as inherited need to be reversed, to make + # sure we merge in the parent's values first so those in the + # block then task 'win' in precedence + environments.reverse() for environment in environments: if environment is None: continue - if not isinstance(environment, dict): - raise AnsibleError("environment must be a dictionary, received %s (%s)" % (environment, type(environment))) + temp_environment = self._templar.template(environment) + if not isinstance(temp_environment, dict): + raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment))) # very deliberately using update here instead of combine_vars, as # these environment settings should not need to merge sub-dicts - final_environment.update(environment) + final_environment.update(temp_environment) final_environment = self._templar.template(final_environment) return self._connection._shell.env_prefix(**final_environment) From dd3d04e96ab30bb0df89b5e3ab1ac9a9d91d5841 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 10:31:14 -0500 Subject: [PATCH 0166/1113] Adding pip install of virtualenv to test deps integration role --- .../roles/ansible_test_deps/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index f71128921d9..5f75085d920 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -10,6 +10,9 @@ ignore_errors: true when: ansible_os_family == 'Debian' +- name: Install virtualenv + pip: name=virtualenv state=present + - name: Install RH epel yum: name="epel-release" state=installed sudo: true From 0b1ad8d4905fa83eddbc08e2a3dd395aa99b8aed Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 10:41:58 -0500 Subject: [PATCH 0167/1113] Switch virtualenv dep installation from pip to package manager --- .../roles/ansible_test_deps/tasks/main.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 5f75085d920..c9cb256a35c 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -11,7 +11,12 @@ when: ansible_os_family == 'Debian' - name: Install virtualenv - pip: name=virtualenv state=present + yum: name=python-virtualenv state=installed + when: ansible_os_family == 'RedHat' + +- name: Install virtualenv + apt: name=python-virtualenv state=installed + when: ansible_os_family == 'Debian' - name: Install RH epel yum: name="epel-release" state=installed From cf3d503f790ddf7ba74bc768bd2faad7a550f5ee Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 11:00:54 -0500 Subject: [PATCH 0168/1113] Moving apt cache update to top to ensure cache is updated before deps installed --- .../roles/ansible_test_deps/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index c9cb256a35c..c2fc955a164 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -1,5 +1,8 @@ --- +- apt: update_cache=yes + when: ansible_os_family == 'Debian' + - name: Install sudo yum: name=sudo state=installed ignore_errors: true @@ -42,9 +45,6 @@ - libselinux-python when: ansible_os_family == 'RedHat' -- apt: update_cache=yes - when: ansible_os_family == 'Debian' - - name: Install Debian ansible dependencies apt: name="{{ item }}" state=installed update_cache=yes sudo: true From 26bbabcfba637e17b36bb20d064c390cf0461e4d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 11:15:06 -0500 Subject: [PATCH 0169/1113] Consolidating package lines for virtualenv install in test deps integration --- .../roles/ansible_test_deps/tasks/main.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index c2fc955a164..ac133730ec5 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -13,14 +13,6 @@ ignore_errors: true when: ansible_os_family == 'Debian' -- name: Install virtualenv - yum: name=python-virtualenv state=installed - when: ansible_os_family == 'RedHat' - -- name: Install virtualenv - apt: name=python-virtualenv state=installed - when: ansible_os_family == 'Debian' - - name: Install RH epel yum: name="epel-release" state=installed sudo: true @@ -43,6 +35,7 @@ - gcc - python-devel - libselinux-python + - python-virtualenv when: ansible_os_family == 'RedHat' - name: Install Debian ansible dependencies @@ -57,6 +50,7 @@ - git - unzip - python-dev + - python-virtualenv when: ansible_os_family == 'Debian' - name: Install ubuntu 12.04 ansible dependencies From 21c127c5813c800204c729d84188f1e6d7bae3e7 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 12:06:17 -0500 Subject: [PATCH 0170/1113] Fixing bugs in ssh known_host fetching * If remote_addr is not set in the PlayContext, use the host.address field instead (which is how the action plugin works) Fixes #13581 --- lib/ansible/plugins/connection/ssh.py | 29 +++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index cce29824e1a..c24d1667348 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -60,11 +60,15 @@ def split_args(argstring): """ return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] -def get_ssh_opts(play_context): +def get_ssh_opts(host, play_context): # FIXME: caching may help here opts_dict = dict() try: - cmd = ['ssh', '-G', play_context.remote_addr] + remote_addr = play_context.remote_addr + if not remote_addr: + remote_addr = host.address + + cmd = ['ssh', '-G', remote_addr] res = subprocess.check_output(cmd) for line in res.split('\n'): if ' ' in line: @@ -137,7 +141,7 @@ def host_in_known_hosts(host, ssh_opts): return False -def fetch_ssh_host_key(play_context, ssh_opts): +def fetch_ssh_host_key(host, play_context, ssh_opts): keyscan_cmd = ['ssh-keyscan'] if play_context.port: @@ -146,7 +150,11 @@ def fetch_ssh_host_key(play_context, ssh_opts): if boolean(ssh_opts.get('hashknownhosts', 'no')): keyscan_cmd.append('-H') - keyscan_cmd.append(play_context.remote_addr) + remote_addr = play_context.remote_addr + if not remote_addr: + remote_addr = host.address + + keyscan_cmd.append(remote_addr) p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) (stdout, stderr) = p.communicate() @@ -194,8 +202,13 @@ class Connection(ConnectionBase): @staticmethod def fetch_and_store_key(host, play_context): - ssh_opts = get_ssh_opts(play_context) - if not host_in_known_hosts(play_context.remote_addr, ssh_opts): + ssh_opts = get_ssh_opts(host, play_context) + + remote_addr = play_context.remote_addr + if not remote_addr: + remote_addr = host.address + + if not host_in_known_hosts(remote_addr, ssh_opts): display.debug("host %s does not have a known host key, fetching it" % host) # build the list of valid host key types, for use later as we scan for keys. @@ -204,7 +217,7 @@ class Connection(ConnectionBase): # attempt to fetch the key with ssh-keyscan. More than one key may be # returned, so we save all and use the above list to determine which - host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') + host_key_data = fetch_ssh_host_key(host, play_context, ssh_opts).strip().split('\n') host_keys = dict() for host_key in host_key_data: (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) @@ -229,7 +242,7 @@ class Connection(ConnectionBase): # prompt the user to add the key # if yes, add it, otherwise raise AnsibleConnectionFailure - display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) + display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, remote_addr)) display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") From 8db4415e2e95e5993822b4f75e700dd14a928ad9 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 17 Dec 2015 12:25:29 -0500 Subject: [PATCH 0171/1113] changed test to use filter for accurate reporting --- test/integration/roles/test_service/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_service/tasks/main.yml b/test/integration/roles/test_service/tasks/main.yml index c0e590643c9..8b61d62143a 100644 --- a/test/integration/roles/test_service/tasks/main.yml +++ b/test/integration/roles/test_service/tasks/main.yml @@ -98,7 +98,7 @@ - name: assert that the broken test failed assert: that: - - "broken_enable_result.failed == True" + - "broken_enable_result|failed" - name: remove the test daemon script file: path=/usr/sbin/ansible_test_service state=absent From 586208234cc921acc70fbe1fff211707ceba0c7a Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 12:42:53 -0500 Subject: [PATCH 0172/1113] Revert "Fixing bugs in ssh known_host fetching" This reverts commit 21c127c5813c800204c729d84188f1e6d7bae3e7. --- lib/ansible/plugins/connection/ssh.py | 29 ++++++++------------------- 1 file changed, 8 insertions(+), 21 deletions(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index c24d1667348..cce29824e1a 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -60,15 +60,11 @@ def split_args(argstring): """ return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] -def get_ssh_opts(host, play_context): +def get_ssh_opts(play_context): # FIXME: caching may help here opts_dict = dict() try: - remote_addr = play_context.remote_addr - if not remote_addr: - remote_addr = host.address - - cmd = ['ssh', '-G', remote_addr] + cmd = ['ssh', '-G', play_context.remote_addr] res = subprocess.check_output(cmd) for line in res.split('\n'): if ' ' in line: @@ -141,7 +137,7 @@ def host_in_known_hosts(host, ssh_opts): return False -def fetch_ssh_host_key(host, play_context, ssh_opts): +def fetch_ssh_host_key(play_context, ssh_opts): keyscan_cmd = ['ssh-keyscan'] if play_context.port: @@ -150,11 +146,7 @@ def fetch_ssh_host_key(host, play_context, ssh_opts): if boolean(ssh_opts.get('hashknownhosts', 'no')): keyscan_cmd.append('-H') - remote_addr = play_context.remote_addr - if not remote_addr: - remote_addr = host.address - - keyscan_cmd.append(remote_addr) + keyscan_cmd.append(play_context.remote_addr) p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) (stdout, stderr) = p.communicate() @@ -202,13 +194,8 @@ class Connection(ConnectionBase): @staticmethod def fetch_and_store_key(host, play_context): - ssh_opts = get_ssh_opts(host, play_context) - - remote_addr = play_context.remote_addr - if not remote_addr: - remote_addr = host.address - - if not host_in_known_hosts(remote_addr, ssh_opts): + ssh_opts = get_ssh_opts(play_context) + if not host_in_known_hosts(play_context.remote_addr, ssh_opts): display.debug("host %s does not have a known host key, fetching it" % host) # build the list of valid host key types, for use later as we scan for keys. @@ -217,7 +204,7 @@ class Connection(ConnectionBase): # attempt to fetch the key with ssh-keyscan. More than one key may be # returned, so we save all and use the above list to determine which - host_key_data = fetch_ssh_host_key(host, play_context, ssh_opts).strip().split('\n') + host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') host_keys = dict() for host_key in host_key_data: (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) @@ -242,7 +229,7 @@ class Connection(ConnectionBase): # prompt the user to add the key # if yes, add it, otherwise raise AnsibleConnectionFailure - display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, remote_addr)) + display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") From e5462194261c7b55ccdf41adc4525dc86a1a34c1 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 12:43:36 -0500 Subject: [PATCH 0173/1113] Revert "Enable host_key checking at the strategy level" This reverts commit 1a6d660d7e285cceec474952a33af4d8dffd0a8d. --- lib/ansible/executor/task_executor.py | 17 +- lib/ansible/inventory/host.py | 11 +- lib/ansible/plugins/connection/__init__.py | 5 +- lib/ansible/plugins/connection/ssh.py | 193 ++------------------- lib/ansible/plugins/strategy/__init__.py | 30 +--- lib/ansible/utils/connection.py | 50 ------ 6 files changed, 33 insertions(+), 273 deletions(-) delete mode 100644 lib/ansible/utils/connection.py diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 2623bc775b2..5d7430fad25 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -32,7 +32,6 @@ from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVar from ansible.playbook.conditional import Conditional from ansible.playbook.task import Task from ansible.template import Templar -from ansible.utils.connection import get_smart_connection_type from ansible.utils.encrypt import key_for_hostname from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unicode import to_unicode @@ -565,7 +564,21 @@ class TaskExecutor: conn_type = self._play_context.connection if conn_type == 'smart': - conn_type = get_smart_connection_type(self._play_context) + conn_type = 'ssh' + if sys.platform.startswith('darwin') and self._play_context.password: + # due to a current bug in sshpass on OSX, which can trigger + # a kernel panic even for non-privileged users, we revert to + # paramiko on that OS when a SSH password is specified + conn_type = "paramiko" + else: + # see if SSH can support ControlPersist if not use paramiko + try: + cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + if "Bad configuration option" in err or "Usage:" in err: + conn_type = "paramiko" + except OSError: + conn_type = "paramiko" connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin) if not connection: diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index 70f9f57b5f1..6263dcbc80d 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -57,7 +57,6 @@ class Host: name=self.name, vars=self.vars.copy(), address=self.address, - has_hostkey=self.has_hostkey, uuid=self._uuid, gathered_facts=self._gathered_facts, groups=groups, @@ -66,11 +65,10 @@ class Host: def deserialize(self, data): self.__init__() - self.name = data.get('name') - self.vars = data.get('vars', dict()) - self.address = data.get('address', '') - self.has_hostkey = data.get('has_hostkey', False) - self._uuid = data.get('uuid', uuid.uuid4()) + self.name = data.get('name') + self.vars = data.get('vars', dict()) + self.address = data.get('address', '') + self._uuid = data.get('uuid', uuid.uuid4()) groups = data.get('groups', []) for group_data in groups: @@ -91,7 +89,6 @@ class Host: self._gathered_facts = False self._uuid = uuid.uuid4() - self.has_hostkey = False def __repr__(self): return self.get_name() diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 7fc19c8c195..06616bac4ca 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -23,11 +23,11 @@ __metaclass__ = type import fcntl import gettext import os - from abc import ABCMeta, abstractmethod, abstractproperty -from functools import wraps +from functools import wraps from ansible.compat.six import with_metaclass + from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins import shell_loader @@ -233,4 +233,3 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): f = self._play_context.connection_lockfd fcntl.lockf(f, fcntl.LOCK_UN) display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f)) - diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index cce29824e1a..a2abcf20aee 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -19,12 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.compat.six import text_type - -import base64 import fcntl -import hmac -import operator import os import pipes import pty @@ -33,13 +28,9 @@ import shlex import subprocess import time -from hashlib import md5, sha1, sha256 - from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase -from ansible.utils.boolean import boolean -from ansible.utils.connection import get_smart_connection_type from ansible.utils.path import unfrackpath, makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode @@ -50,128 +41,7 @@ except ImportError: display = Display() SSHPASS_AVAILABLE = None -HASHED_KEY_MAGIC = "|1|" -def split_args(argstring): - """ - Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a - list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to - the argument list. The list will not contain any empty elements. - """ - return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] - -def get_ssh_opts(play_context): - # FIXME: caching may help here - opts_dict = dict() - try: - cmd = ['ssh', '-G', play_context.remote_addr] - res = subprocess.check_output(cmd) - for line in res.split('\n'): - if ' ' in line: - (key, val) = line.split(' ', 1) - else: - key = line - val = '' - opts_dict[key.lower()] = val - - # next, we manually override any options that are being - # set via ssh_args or due to the fact that `ssh -G` doesn't - # actually use the options set via -o - for opt in ['ssh_args', 'ssh_common_args', 'ssh_extra_args']: - attr = getattr(play_context, opt, None) - if attr is not None: - args = split_args(attr) - for arg in args: - if '=' in arg: - (key, val) = arg.split('=', 1) - opts_dict[key.lower()] = val - - return opts_dict - except subprocess.CalledProcessError: - return dict() - -def host_in_known_hosts(host, ssh_opts): - # the setting from the ssh_opts may actually be multiple files, so - # we use shlex.split and simply take the first one specified - user_host_file = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) - - host_file_list = [] - host_file_list.append(user_host_file) - host_file_list.append("/etc/ssh/ssh_known_hosts") - host_file_list.append("/etc/ssh/ssh_known_hosts2") - - hfiles_not_found = 0 - for hf in host_file_list: - if not os.path.exists(hf): - continue - try: - host_fh = open(hf) - except (OSError, IOError) as e: - continue - else: - data = host_fh.read() - host_fh.close() - - for line in data.split("\n"): - line = line.strip() - if line is None or " " not in line: - continue - tokens = line.split() - if not tokens: - continue - if tokens[0].find(HASHED_KEY_MAGIC) == 0: - # this is a hashed known host entry - try: - (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2) - hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) - hash.update(host) - if hash.digest() == kn_host.decode('base64'): - return True - except: - # invalid hashed host key, skip it - continue - else: - # standard host file entry - if host in tokens[0]: - return True - - return False - -def fetch_ssh_host_key(play_context, ssh_opts): - keyscan_cmd = ['ssh-keyscan'] - - if play_context.port: - keyscan_cmd.extend(['-p', text_type(play_context.port)]) - - if boolean(ssh_opts.get('hashknownhosts', 'no')): - keyscan_cmd.append('-H') - - keyscan_cmd.append(play_context.remote_addr) - - p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) - (stdout, stderr) = p.communicate() - if stdout == '': - raise AnsibleConnectionFailure("Failed to connect to the host to fetch the host key: %s." % stderr) - else: - return stdout - -def add_host_key(host_key, ssh_opts): - # the setting from the ssh_opts may actually be multiple files, so - # we use shlex.split and simply take the first one specified - user_known_hosts = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) - user_ssh_dir = os.path.dirname(user_known_hosts) - - if not os.path.exists(user_ssh_dir): - raise AnsibleError("the user ssh directory does not exist: %s" % user_ssh_dir) - elif not os.path.isdir(user_ssh_dir): - raise AnsibleError("%s is not a directory" % user_ssh_dir) - - try: - display.vv("adding to known_hosts file: %s" % user_known_hosts) - with open(user_known_hosts, 'a') as f: - f.write(host_key) - except (OSError, IOError) as e: - raise AnsibleError("error when trying to access the known hosts file: '%s', error was: %s" % (user_known_hosts, text_type(e))) class Connection(ConnectionBase): ''' ssh based connections ''' @@ -192,56 +62,6 @@ class Connection(ConnectionBase): def _connect(self): return self - @staticmethod - def fetch_and_store_key(host, play_context): - ssh_opts = get_ssh_opts(play_context) - if not host_in_known_hosts(play_context.remote_addr, ssh_opts): - display.debug("host %s does not have a known host key, fetching it" % host) - - # build the list of valid host key types, for use later as we scan for keys. - # we also use this to determine the most preferred key when multiple keys are available - valid_host_key_types = [x.lower() for x in ssh_opts.get('hostbasedkeytypes', '').split(',')] - - # attempt to fetch the key with ssh-keyscan. More than one key may be - # returned, so we save all and use the above list to determine which - host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') - host_keys = dict() - for host_key in host_key_data: - (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) - key_type = key_type.lower() - if key_type in valid_host_key_types and key_type not in host_keys: - host_keys[key_type.lower()] = host_key - - if len(host_keys) == 0: - raise AnsibleConnectionFailure("none of the available host keys found were in the HostBasedKeyTypes configuration option") - - # now we determine the preferred key by sorting the above dict on the - # index of the key type in the valid keys list - preferred_key = sorted(host_keys.items(), cmp=lambda x,y: cmp(valid_host_key_types.index(x), valid_host_key_types.index(y)), key=operator.itemgetter(0))[0] - - # shamelessly copied from here: - # https://github.com/ojarva/python-sshpubkeys/blob/master/sshpubkeys/__init__.py#L39 - # (which shamelessly copied it from somewhere else...) - (host_info, key_type, key_hash) = preferred_key[1].strip().split(' ', 3) - decoded_key = key_hash.decode('base64') - fp_plain = md5(decoded_key).hexdigest() - key_data = ':'.join(a+b for a, b in zip(fp_plain[::2], fp_plain[1::2])) - - # prompt the user to add the key - # if yes, add it, otherwise raise AnsibleConnectionFailure - display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) - display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) - display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) - response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") - display.display("") - if boolean(response): - add_host_key(host_key, ssh_opts) - return True - else: - raise AnsibleConnectionFailure("Host key validation failed.") - - return False - @staticmethod def _sshpass_available(): global SSHPASS_AVAILABLE @@ -280,6 +100,15 @@ class Connection(ConnectionBase): return controlpersist, controlpath + @staticmethod + def _split_args(argstring): + """ + Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a + list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to + the argument list. The list will not contain any empty elements. + """ + return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] + def _add_args(self, explanation, args): """ Adds the given args to self._command and displays a caller-supplied @@ -328,7 +157,7 @@ class Connection(ConnectionBase): # Next, we add [ssh_connection]ssh_args from ansible.cfg. if self._play_context.ssh_args: - args = split_args(self._play_context.ssh_args) + args = self._split_args(self._play_context.ssh_args) self._add_args("ansible.cfg set ssh_args", args) # Now we add various arguments controlled by configuration file settings @@ -381,7 +210,7 @@ class Connection(ConnectionBase): for opt in ['ssh_common_args', binary + '_extra_args']: attr = getattr(self._play_context, opt, None) if attr is not None: - args = split_args(attr) + args = self._split_args(attr) self._add_args("PlayContext set %s" % opt, args) # Check if ControlPersist is enabled and add a ControlPath if one hasn't diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index e460708f906..7b2a3794efc 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -29,7 +29,7 @@ import zlib from jinja2.exceptions import UndefinedError from ansible import constants as C -from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure +from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable from ansible.executor.play_iterator import PlayIterator from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_result import TaskResult @@ -39,7 +39,6 @@ from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.included_file import IncludedFile from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader from ansible.template import Templar -from ansible.utils.connection import get_smart_connection_type from ansible.vars.unsafe_proxy import wrap_var try: @@ -140,33 +139,6 @@ class StrategyBase: display.debug("entering _queue_task() for %s/%s" % (host, task)) - if C.HOST_KEY_CHECKING and not host.has_hostkey: - # caveat here, regarding with loops. It is assumed that none of the connection - # related variables would contain '{{item}}' as it would cause some really - # weird loops. As is, if someone did something odd like that they would need - # to disable host key checking - templar = Templar(loader=self._loader, variables=task_vars) - temp_pc = play_context.set_task_and_variable_override(task=task, variables=task_vars, templar=templar) - temp_pc.post_validate(templar) - if temp_pc.connection in ('smart', 'ssh') and get_smart_connection_type(temp_pc) == 'ssh': - try: - # get the ssh connection plugin's class, and use its builtin - # static method to fetch and save the key to the known_hosts file - ssh_conn = connection_loader.get('ssh', class_only=True) - ssh_conn.fetch_and_store_key(host, temp_pc) - except AnsibleConnectionFailure as e: - # if that fails, add the host to the list of unreachable - # hosts and send the appropriate callback - self._tqm._unreachable_hosts[host.name] = True - self._tqm._stats.increment('dark', host.name) - tr = TaskResult(host=host, task=task, return_data=dict(msg=text_type(e))) - self._tqm.send_callback('v2_runner_on_unreachable', tr) - return - - # finally, we set the has_hostkey flag to true for this - # host so we can skip it quickly in the future - host.has_hostkey = True - task_vars['hostvars'] = self._tqm.hostvars # and then queue the new task display.debug("%s - putting task (%s) in queue" % (host, task)) diff --git a/lib/ansible/utils/connection.py b/lib/ansible/utils/connection.py deleted file mode 100644 index 6f6b405640e..00000000000 --- a/lib/ansible/utils/connection.py +++ /dev/null @@ -1,50 +0,0 @@ -# (c) 2015, Ansible, Inc. <support@ansible.com> -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import subprocess -import sys - - -__all__ = ['get_smart_connection_type'] - -def get_smart_connection_type(play_context): - ''' - Uses the ssh command with the ControlPersist option while checking - for an error to determine if we should use ssh or paramiko. Also - may take other factors into account. - ''' - - conn_type = 'ssh' - if sys.platform.startswith('darwin') and play_context.password: - # due to a current bug in sshpass on OSX, which can trigger - # a kernel panic even for non-privileged users, we revert to - # paramiko on that OS when a SSH password is specified - conn_type = "paramiko" - else: - # see if SSH can support ControlPersist if not use paramiko - try: - cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (out, err) = cmd.communicate() - if "Bad configuration option" in err or "Usage:" in err: - conn_type = "paramiko" - except OSError: - conn_type = "paramiko" - - return conn_type From 1b5e7ce0253c896f5166b5ffd1c2614090cc75a1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 17 Dec 2015 10:23:02 -0800 Subject: [PATCH 0174/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 16a3bdaa7da..c75c0003697 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 16a3bdaa7da9e9f7c0572d3a3fdbfd79f29c2b9d +Subproject commit c75c0003697d00f52cedb68d4c1b05b7e95991e0 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8ec4f95ffd6..06bdec0cac8 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8ec4f95ffd6d4e837cf0f3dd28649fb09afd0caf +Subproject commit 06bdec0cac86ef2339e0b4d8a4616ee24619956f From ce1febe28bb538c9d6db59449caf4da9dcf23f7e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 17 Dec 2015 11:25:45 -0800 Subject: [PATCH 0175/1113] debug line needs var not msg --- test/integration/roles/test_get_url/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 09ee34277a0..640c987790f 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -78,7 +78,7 @@ # If distros start backporting SNI, can make a new conditional based on whether this works: # python -c 'from ssl import SSLContext' -- debug: msg=get_url_result +- debug: var=get_url_result - name: Assert that SNI works with this python version assert: that: From bad1c173b87a7b68fc0ae79b35376fc31e8cc5d7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 17 Dec 2015 11:36:36 -0800 Subject: [PATCH 0176/1113] Update core submodule for mysql_db fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c75c0003697..b4a3fdd4933 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c75c0003697d00f52cedb68d4c1b05b7e95991e0 +Subproject commit b4a3fdd493378853c0b6ab35d5d8bcf52612a4a0 From 8c6f56f982fce50d5b030928e425740a30d4f86c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 17 Dec 2015 11:46:26 -0800 Subject: [PATCH 0177/1113] kennetreitz.org times out but www.kennethreitz.org is fine --- test/integration/roles/test_lookups/tasks/main.yml | 6 +++--- test/integration/roles/test_uri/tasks/main.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index 5ca29e27c1e..3c5e066ee34 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -177,7 +177,7 @@ - name: Test that retrieving a url with invalid cert fails set_fact: - web_data: "{{ lookup('url', 'https://kennethreitz.org/') }}" + web_data: "{{ lookup('url', 'https://www.kennethreitz.org/') }}" ignore_errors: True register: url_invalid_cert @@ -188,9 +188,9 @@ - name: Test that retrieving a url with invalid cert with validate_certs=False works set_fact: - web_data: "{{ lookup('url', 'https://kennethreitz.org/', validate_certs=False) }}" + web_data: "{{ lookup('url', 'https://www.kennethreitz.org/', validate_certs=False) }}" register: url_no_validate_cert - assert: that: - - "'kennethreitz.org' in web_data" + - "'www.kennethreitz.org' in web_data" diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 7300578982d..18229e6b7cf 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -94,7 +94,7 @@ - name: test https fetch to a site with mismatched hostname and certificate uri: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result @@ -117,7 +117,7 @@ - name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no get_url: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result From 5929ffc7c3b79b830edeebdb8542b53c3c0a15b3 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 16:01:56 -0500 Subject: [PATCH 0178/1113] Make --list-tasks respect tags Also makes the output closer to the appearance of v1 Fixes #13260 --- lib/ansible/cli/playbook.py | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index a9c0ed018dc..e51d5d3993b 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -30,6 +30,7 @@ from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.executor.playbook_executor import PlaybookExecutor from ansible.inventory import Inventory from ansible.parsing.dataloader import DataLoader +from ansible.playbook.play_context import PlayContext from ansible.utils.vars import load_extra_vars from ansible.vars import VariableManager @@ -152,18 +153,10 @@ class PlaybookCLI(CLI): for p in results: display.display('\nplaybook: %s' % p['playbook']) - i = 1 - for play in p['plays']: - if play.name: - playname = play.name - else: - playname = '#' + str(i) - - msg = "\n PLAY: %s" % (playname) - mytags = set() - if self.options.listtags and play.tags: - mytags = mytags.union(set(play.tags)) - msg += ' TAGS: [%s]' % (','.join(mytags)) + for idx, play in enumerate(p['plays']): + msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name) + mytags = set(play.tags) + msg += ' TAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) @@ -176,20 +169,21 @@ class PlaybookCLI(CLI): if self.options.listtags or self.options.listtasks: taskmsg = ' tasks:' + all_vars = variable_manager.get_vars(loader=loader, play=play) + play_context = PlayContext(play=play, options=self.options) for block in play.compile(): + block = block.filter_tagged_tasks(play_context, all_vars) if not block.has_tasks(): continue - j = 1 for task in block.block: - taskmsg += "\n %s" % task - if self.options.listtags and task.tags: - taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags))) - j = j + 1 + if task.action == 'meta': + continue + taskmsg += "\n %s" % task.get_name() + taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags))) display.display(taskmsg) - i = i + 1 return 0 else: return results From d4ffc96c8039e5a79baf23be173d03c2e4c8565f Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 16:30:23 -0500 Subject: [PATCH 0179/1113] Further tweaks to the output format of list tasks/tags --- lib/ansible/cli/playbook.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index e51d5d3993b..d307abdfcc1 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -156,7 +156,7 @@ class PlaybookCLI(CLI): for idx, play in enumerate(p['plays']): msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name) mytags = set(play.tags) - msg += ' TAGS: [%s]' % (','.join(mytags)) + msg += '\tTAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) @@ -166,8 +166,11 @@ class PlaybookCLI(CLI): display.display(msg) + all_tags = set() if self.options.listtags or self.options.listtasks: - taskmsg = ' tasks:' + taskmsg = '' + if self.options.listtasks: + taskmsg = ' tasks:\n' all_vars = variable_manager.get_vars(loader=loader, play=play) play_context = PlayContext(play=play, options=self.options) @@ -179,8 +182,18 @@ class PlaybookCLI(CLI): for task in block.block: if task.action == 'meta': continue - taskmsg += "\n %s" % task.get_name() - taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags))) + + all_tags.update(task.tags) + if self.options.listtasks: + cur_tags = list(mytags.union(set(task.tags))) + cur_tags.sort() + taskmsg += " %s" % task.action + taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) + + if self.options.listtags: + cur_tags = list(mytags.union(all_tags)) + cur_tags.sort() + taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags) display.display(taskmsg) From 4ba7158282f148c90c72f824d6ebcd1a9953b580 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 16:33:23 -0500 Subject: [PATCH 0180/1113] Fixing a mistake from tweaking list stuff too much Use the action only if the task name is not set --- lib/ansible/cli/playbook.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index d307abdfcc1..dfd06b19208 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -187,7 +187,10 @@ class PlaybookCLI(CLI): if self.options.listtasks: cur_tags = list(mytags.union(set(task.tags))) cur_tags.sort() - taskmsg += " %s" % task.action + if task.name: + taskmsg += " %s" % task.get_name() + else: + taskmsg += " %s" % task.action taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) if self.options.listtags: From 3057fc1753eff42fb073ae866734cb9127cbd25a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 17 Dec 2015 13:46:15 -0800 Subject: [PATCH 0181/1113] Update submodule ref for mysql_user fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b4a3fdd4933..9366dfb63e5 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b4a3fdd493378853c0b6ab35d5d8bcf52612a4a0 +Subproject commit 9366dfb63e565c9e0901d714be8832fc89b275d6 From c5eda277ac6ca50cf593a724a368ad973d1a3935 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 17 Dec 2015 17:51:42 -0800 Subject: [PATCH 0182/1113] Fix get_url tests in light of distros backporting SNI support --- .../roles/test_get_url/tasks/main.yml | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 640c987790f..d7885f0905e 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -16,6 +16,21 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. +- name: Determine if python looks like it will support modern ssl features like SNI + command: python -c 'from ssl import SSLContext' + ignore_errors: True + register: python_test + +- name: Set python_has_sslcontext if we have it + set_fact: + python_has_ssl_context: True + when: python_test.rc == 0 + +- name: Set python_has_sslcontext False if we don't have it + set_fact: + python_has_ssl_context: False + when: python_test.rc != 0 + - name: test https fetch get_url: url="https://raw.githubusercontent.com/ansible/ansible/devel/README.md" dest={{output_dir}}/get_url.txt force=yes register: result @@ -74,7 +89,7 @@ - command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" register: data_result - when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}" + when: "{{ python_has_ssl_context }}" # If distros start backporting SNI, can make a new conditional based on whether this works: # python -c 'from ssl import SSLContext' @@ -84,11 +99,11 @@ that: - 'data_result.rc == 0' - '"failed" not in get_url_result' - when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}" + when: "{{ python_has_ssl_context }}" # If the client doesn't support SNI then get_url should have failed with a certificate mismatch - name: Assert that hostname verification failed because SNI is not supported on this version of python assert: that: - 'get_url_result["failed"]' - when: "{{ ansible_python_version | version_compare('2.7.9', '<') }}" + when: "{{ not python_has_ssl_context }}" From 12c0bb9414224517c6b15ec1d58aedd45d40703d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 20:52:49 -0500 Subject: [PATCH 0183/1113] Use --source instead of -e for awk in integration Makefile --- test/integration/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index a2d91f96f1a..dcd30f0b836 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -193,5 +193,5 @@ test_lookup_paths: no_log: # This test expects 7 loggable vars and 0 non loggable ones, if either mismatches it fails, run the ansible-playbook command to debug - [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -vvvvv | awk -e 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ] + [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -vvvvv | awk --source 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ] From 1f3eec293bad4add2e52fbc52a7bbdcc912c3ab8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 17 Dec 2015 20:06:53 -0800 Subject: [PATCH 0184/1113] Install an updated version of pycrypto on Ubuntu12 from pip --- .../roles/ansible_test_deps/tasks/main.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index ac133730ec5..0b9e58c6598 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -67,6 +67,14 @@ - rubygems-integration when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "14.04" +# Not sure why CentOS 6 is working without this.... +#- name: Install Red Hat 6 ansible dependencies +# yum: name="{{ item }}" state=installed +# sudo: true +# with_items: +# - python-crypto2.6 +# when: ansible_distribution in ('CentOS', 'RedHat') and ansible_distribution_major_version == "6" + - name: Install ansible pip deps sudo: true pip: name="{{ item }}" @@ -75,6 +83,13 @@ - Jinja2 - paramiko +- name: Install ubuntu 12.04 ansible pip deps + sudo: true + pip: name="{{ item }}" + with_items: + - pycrypto + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04" + - name: Remove tty sudo requirement sudo: true lineinfile: "dest=/etc/sudoers regexp='^Defaults[ , ]*requiretty' line='#Defaults requiretty'" From 3143b352c53e2beeecec996d4ca80fa7a4293f93 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Dec 2015 23:07:28 -0500 Subject: [PATCH 0185/1113] Add ca-certificates update to the integration deps playbook --- .../roles/ansible_test_deps/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 0b9e58c6598..85fad6a7fbb 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -53,6 +53,10 @@ - python-virtualenv when: ansible_os_family == 'Debian' +- name: update ca certificates + yum: name=ca-certificates state=latest + when: ansible_os_family == 'RedHat' + - name: Install ubuntu 12.04 ansible dependencies apt: name="{{ item }}" state=installed update_cache=yes sudo: true From a391d6f89ab906d585e623f58789b39fb0797faf Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 17 Dec 2015 20:09:48 -0800 Subject: [PATCH 0186/1113] Add state=latest to pip install of pycrypto --- .../roles/ansible_test_deps/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 85fad6a7fbb..897a4e54edb 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -89,7 +89,7 @@ - name: Install ubuntu 12.04 ansible pip deps sudo: true - pip: name="{{ item }}" + pip: name="{{ item }}" state=latest with_items: - pycrypto when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04" From 44e30e49dd4b678ff21d308d0e8b00b769de75e1 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 18 Dec 2015 07:47:23 -0500 Subject: [PATCH 0187/1113] Add awk to integration test deps list --- .../roles/ansible_test_deps/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 897a4e54edb..25b19d040e8 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -33,6 +33,7 @@ - openssl - make - gcc + - gawk - python-devel - libselinux-python - python-virtualenv @@ -49,6 +50,7 @@ - mercurial - git - unzip + - gawk - python-dev - python-virtualenv when: ansible_os_family == 'Debian' From 1debc2da44e05282fea216e4b6e14e83d50bb4ea Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 18 Dec 2015 10:34:27 -0500 Subject: [PATCH 0188/1113] Do a full yum update to make sure packages are latest version For the deps setup of integration tests, as we sometimes see odd errors we can't reproduce, which may be related to slightly out of date package dependencies. --- .../roles/ansible_test_deps/tasks/main.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 25b19d040e8..17198cdc41f 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -55,8 +55,12 @@ - python-virtualenv when: ansible_os_family == 'Debian' -- name: update ca certificates - yum: name=ca-certificates state=latest +#- name: update ca certificates +# yum: name=ca-certificates state=latest +# when: ansible_os_family == 'RedHat' + +- name: update all rpm packages + yum: name=* state=latest when: ansible_os_family == 'RedHat' - name: Install ubuntu 12.04 ansible dependencies From a3dcb910b8b8ad1c1ff65c31102cccd68ed31bf9 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 18 Dec 2015 10:58:55 -0500 Subject: [PATCH 0189/1113] Fixing bugs with {changed,failed}_when and until with registered vars * Saving of the registered variable was occuring after the tests for changed/failed_when. * Each of the above fields and until were being post_validated too early, so variables which were not defined at that time were causing task failures. Fixes #13591 --- lib/ansible/executor/task_executor.py | 11 +++++------ lib/ansible/playbook/task.py | 21 +++++++++++++++++++++ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 5d7430fad25..b0a5157a525 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -387,7 +387,6 @@ class TaskExecutor: # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions - #vars_copy = variables.copy() vars_copy = variables.copy() display.debug("starting attempt loop") @@ -404,6 +403,11 @@ class TaskExecutor: return dict(unreachable=True, msg=to_unicode(e)) display.debug("handler run complete") + # update the local copy of vars with the registered value, if specified, + # or any facts which may have been generated by the module execution + if self._task.register: + vars_copy[self._task.register] = result + if self._task.async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result @@ -433,11 +437,6 @@ class TaskExecutor: return failed_when_result return False - # update the local copy of vars with the registered value, if specified, - # or any facts which may have been generated by the module execution - if self._task.register: - vars_copy[self._task.register] = result - if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 17f1952e39c..825ee502691 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -260,6 +260,27 @@ class Task(Base, Conditional, Taggable, Become): break return templar.template(value, convert_bare=True) + def _post_validate_changed_when(self, attr, value, templar): + ''' + changed_when is evaluated after the execution of the task is complete, + and should not be templated during the regular post_validate step. + ''' + return value + + def _post_validate_failed_when(self, attr, value, templar): + ''' + failed_when is evaluated after the execution of the task is complete, + and should not be templated during the regular post_validate step. + ''' + return value + + def _post_validate_until(self, attr, value, templar): + ''' + until is evaluated after the execution of the task is complete, + and should not be templated during the regular post_validate step. + ''' + return value + def get_vars(self): all_vars = dict() if self._block: From f2364ecf5f9abcb11112dc7fe7c7eaffb6703bd1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Dec 2015 08:10:57 -0800 Subject: [PATCH 0190/1113] Add a Fedora latest host into the mix --- test/utils/ansible-playbook_integration_runner/main.yml | 7 ++++++- .../roles/ansible_test_deps/tasks/main.yml | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 5d15541490f..9bcda9c71ec 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -22,7 +22,12 @@ image: "ami-96a818fe" ssh_user: "centos" platform: "centos-7-x86_64" - + - distribution: "Fedora" + version: "23" + image: "ami-518bfb3b" + ssh_user: "fedora" + platform: "fedora-23-x86_64" + tasks: - debug: var=ansible_version - include: ec2.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 17198cdc41f..16bdde79a05 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -16,10 +16,10 @@ - name: Install RH epel yum: name="epel-release" state=installed sudo: true - when: ansible_os_family == 'RedHat' + when: ansible_distribution in ('CentOS', 'RedHat') - name: Install RH ansible dependencies - yum: name="{{ item }}" state=installed + package: name="{{ item }}" state=installed sudo: true with_items: - python-pip From 0c154e81f055e07c78acedc8ac310a8011ff8274 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 18 Dec 2015 11:30:14 -0500 Subject: [PATCH 0191/1113] Make integration tests run in parallel with async --- .../roles/run_integration/tasks/main.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 2114567d152..980d4a4d32b 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -10,11 +10,21 @@ register: results - shell: ". hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" + async: 3600 + poll: 0 + register: async_test_results sudo: true environment: TEST_FLAGS: "{{ run_integration_test_flags|default(lookup('env', 'TEST_FLAGS')) }}" CREDENTIALS_FILE: "{{ run_integration_credentials_file|default(lookup('env', 'CREDENTIALS_FILE')) }}" args: chdir: "{{ results.stdout }}/ansible" + +- name: poll for test results + async_status: + jid: "{{async_test_results.ansible_job_id}}" register: test_results + until: test_results.finished + retries: 360 + wait: 10 ignore_errors: true From 73a0153b8e3e26ac095e140f6ffa6f8a1d756ff6 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 18 Dec 2015 12:44:57 -0500 Subject: [PATCH 0192/1113] Fix typo in integration test runner role --- .../roles/run_integration/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 980d4a4d32b..3eba8285443 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -26,5 +26,5 @@ register: test_results until: test_results.finished retries: 360 - wait: 10 + delay: 10 ignore_errors: true From 5d798c2725475b045fb06b46cba08c39bfcfeda8 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 18 Dec 2015 12:14:03 -0500 Subject: [PATCH 0193/1113] added missing features to changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 005171ec9a9..0a5e7e2b7c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -371,6 +371,8 @@ allowed in future versions: explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. * environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default. * ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. +* random cows are more random +* when: now gets the registered var after the first iteration, making it possible to break out of item loops * Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour: ``` From 5dbd7c18a1011e5bc922731574815c22a80d5bc6 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 18 Dec 2015 13:57:58 -0500 Subject: [PATCH 0194/1113] added note about add_hosts --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a5e7e2b7c1..17180993a2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -370,6 +370,7 @@ allowed in future versions: * We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. * environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default. +* add_hosts is much stricter about host name and will prevent invalid names from being added. * ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. * random cows are more random * when: now gets the registered var after the first iteration, making it possible to break out of item loops From 1cc83dd0d968c264c3da4982aa2a658d2e4aeb51 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Dec 2015 11:50:06 -0800 Subject: [PATCH 0195/1113] Make tests that use kennethreitz retry. --- test/integration/roles/test_get_url/tasks/main.yml | 9 +++++++++ test/integration/roles/test_uri/tasks/main.yml | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index d7885f0905e..cbf3b345f18 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -47,6 +47,12 @@ dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result + # kennethreitz having trouble staying up. Eventually need to install our own + # certs & web server to test this... also need to install and test it with + # a proxy so the complications are inevitable + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/shouldnotexist.html" @@ -65,6 +71,9 @@ dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/kreitz.html" diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 18229e6b7cf..9ce05938b62 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -98,6 +98,12 @@ dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result + # kennethreitz having trouble staying up. Eventually need to install our own + # certs & web server to test this... also need to install and test it with + # a proxy so the complications are inevitable + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/shouldnotexist.html" @@ -121,6 +127,9 @@ dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/kreitz.html" From 02f65eaa805f39a15e35a813bcd6a1fdc24ade8c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 18 Dec 2015 14:59:05 -0500 Subject: [PATCH 0196/1113] Make integration runner ec2 add_hosts use valid host names --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index 59e15f0da1a..d4740d95708 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -30,7 +30,7 @@ - name: Add hosts group temporary inventory group with pem path add_host: - name: "{{ item.1.platform }} {{ ec2.results[item.0]['instances'][0]['public_ip'] }}" + name: "{{ item.1.platform }}-{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" groups: dynamic_hosts ansible_ssh_host: "{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" ansible_ssh_private_key_file: '{{ pem_path }}' From 0823a2c16f923bd950399dd879b5440356cb8411 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 18 Dec 2015 15:33:44 -0500 Subject: [PATCH 0197/1113] Removing update all for test deps, it didn't fix the problem --- .../roles/ansible_test_deps/tasks/main.yml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 16bdde79a05..234eb70f92a 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -55,12 +55,8 @@ - python-virtualenv when: ansible_os_family == 'Debian' -#- name: update ca certificates -# yum: name=ca-certificates state=latest -# when: ansible_os_family == 'RedHat' - -- name: update all rpm packages - yum: name=* state=latest +- name: update ca certificates + yum: name=ca-certificates state=latest when: ansible_os_family == 'RedHat' - name: Install ubuntu 12.04 ansible dependencies From 68fe3d856f3a58d4cf84053a803bb5e286d61773 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Dec 2015 14:04:51 -0800 Subject: [PATCH 0198/1113] Fedora 23 needs to have python2 packages installed --- test/utils/ansible-playbook_integration_runner/main.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 9bcda9c71ec..8683ffd5440 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -33,6 +33,15 @@ - include: ec2.yml when: groups['dynamic_hosts'] is not defined +# Have to hardcode these per-slave. We can't even run setup yet so we can't +# introspect what they have. +- hosts: dynamic_hosts + sudo: true + tasks: + - name: Install packages that let setup and package manager modules run + raw: dnf install -y python2 python2-dnf libselinux-python + when: "{{ inventory_hostname }} == 'fedora-23-x86_64'" + - hosts: dynamic_hosts sudo: true vars: From ec60bfbb3f0b88d37b91a2deae2bf6b79a1091dc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Dec 2015 14:36:17 -0800 Subject: [PATCH 0199/1113] Ubuntu images with hvm ssd --- test/utils/ansible-playbook_integration_runner/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 8683ffd5440..b8942172bce 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -4,12 +4,12 @@ slaves: - distribution: "Ubuntu" version: "12.04" - image: "ami-2ccc7a44" + image: "ami-309ddf5a" ssh_user: "ubuntu" platform: "ubuntu-12.04-x86_64" - distribution: "Ubuntu" version: "14.04" - image: "ami-9a562df2" + image: "ami-d06632ba" ssh_user: "ubuntu" platform: "ubuntu-14.04-x86_64" - distribution: "CentOS" From 26e5bcdb39517e8247e59ac038db7dd641cbb7fa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Dec 2015 14:38:54 -0800 Subject: [PATCH 0200/1113] Bugfix the fedora 23 install task --- test/utils/ansible-playbook_integration_runner/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index b8942172bce..e82e0dea3f2 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -37,10 +37,11 @@ # introspect what they have. - hosts: dynamic_hosts sudo: true + gather_facts: False tasks: - name: Install packages that let setup and package manager modules run raw: dnf install -y python2 python2-dnf libselinux-python - when: "{{ inventory_hostname }} == 'fedora-23-x86_64'" + when: "'{{ inventory_hostname }}' == 'fedora-23-x86_64'" - hosts: dynamic_hosts sudo: true From 78dde62710bd63f931bce21cf4352994a5a36873 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Dec 2015 15:14:38 -0800 Subject: [PATCH 0201/1113] What is going on here --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index d4740d95708..c6971486ec3 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -28,6 +28,8 @@ - name: Wait a little longer for centos pause: seconds=20 +- debug: var=ec2.results + - name: Add hosts group temporary inventory group with pem path add_host: name: "{{ item.1.platform }}-{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" From f7ed33378e234542950b992499e848a8284cc2fa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Dec 2015 15:42:41 -0800 Subject: [PATCH 0202/1113] Fix the fedora host detection --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 -- test/utils/ansible-playbook_integration_runner/main.yml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index c6971486ec3..d4740d95708 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -28,8 +28,6 @@ - name: Wait a little longer for centos pause: seconds=20 -- debug: var=ec2.results - - name: Add hosts group temporary inventory group with pem path add_host: name: "{{ item.1.platform }}-{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index e82e0dea3f2..4aa17d11c1f 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -41,7 +41,7 @@ tasks: - name: Install packages that let setup and package manager modules run raw: dnf install -y python2 python2-dnf libselinux-python - when: "'{{ inventory_hostname }}' == 'fedora-23-x86_64'" + when: "'fedora-23' in '{{ inventory_hostname }}'" - hosts: dynamic_hosts sudo: true From 3197eeaaa8d49c862fcb98165bcb254c74e10f4e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Dec 2015 22:16:49 -0800 Subject: [PATCH 0203/1113] update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9366dfb63e5..15c1c0cca79 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9366dfb63e565c9e0901d714be8832fc89b275d6 +Subproject commit 15c1c0cca79196d4dde630db2a7eee90367051cc diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 06bdec0cac8..c6829752d85 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 06bdec0cac86ef2339e0b4d8a4616ee24619956f +Subproject commit c6829752d852398c255704cd5d7faa54342e143e From 07a00593066cb439f0b9aea4e815259cc8a2ec75 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Dec 2015 22:23:25 -0800 Subject: [PATCH 0204/1113] update submodule ref for doc fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 15c1c0cca79..fcb3397df79 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 15c1c0cca79196d4dde630db2a7eee90367051cc +Subproject commit fcb3397df7944ff15ea698b5717c06e8fc7d43ba From d2ad17e88f5f1bc2ed7282ec4322aaffd869834a Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Sat, 19 Dec 2015 00:08:49 -0800 Subject: [PATCH 0205/1113] Fixed import typo for memcache module in tests. The typo caused the test for the memcached cache plugin to be skipped even when the necessary memcache python module was installed. --- test/units/plugins/cache/test_cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/plugins/cache/test_cache.py b/test/units/plugins/cache/test_cache.py index 0547ba55bf0..cd82e1ef2c8 100644 --- a/test/units/plugins/cache/test_cache.py +++ b/test/units/plugins/cache/test_cache.py @@ -26,7 +26,7 @@ from ansible.plugins.cache.memory import CacheModule as MemoryCache HAVE_MEMCACHED = True try: - import memcached + import memcache except ImportError: HAVE_MEMCACHED = False else: From 6127a8585e8eaea159ed5fd91c3ddb61b2d25dc8 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 19 Dec 2015 11:45:59 -0500 Subject: [PATCH 0206/1113] removed invocation info as it is not no_log aware This was added in 1.9 and 2.0 tried to copy, but since it cannot obey no_log restrictions I commented it out. I did not remove as it is still very useful for module invocation debugging. --- lib/ansible/plugins/action/__init__.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e9b18651d66..c363a47ec32 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -82,13 +82,14 @@ class ActionBase(with_metaclass(ABCMeta, object)): * Module parameters. These are stored in self._task.args """ - # store the module invocation details into the results results = {} - if self._task.async == 0: - results['invocation'] = dict( - module_name = self._task.action, - module_args = self._task.args, - ) + # This does not respect no_log set by module args, left here for debugging module invocation + #if self._task.async == 0: + # # store the module invocation details into the results + # results['invocation'] = dict( + # module_name = self._task.action, + # module_args = self._task.args, + # ) return results def _configure_module(self, module_name, module_args, task_vars=None): From c63ae9948543a3f73ae17dc4eecae7b22fb62947 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 19 Dec 2015 10:10:38 -0800 Subject: [PATCH 0207/1113] Make sure that yum is present on redhat family systems (makes things also work on fedora systems where dnf is the default) --- .../roles/ansible_test_deps/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 234eb70f92a..89f7382a1e4 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -37,6 +37,8 @@ - python-devel - libselinux-python - python-virtualenv + - yum + - yum-metadata-parser when: ansible_os_family == 'RedHat' - name: Install Debian ansible dependencies From 2936682f004d9d3fc349e31113607636e971b71b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 19 Dec 2015 11:09:20 -0800 Subject: [PATCH 0208/1113] Revert "removed invocation info as it is not no_log aware" This reverts commit 6127a8585e8eaea159ed5fd91c3ddb61b2d25dc8. --- lib/ansible/plugins/action/__init__.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index c363a47ec32..e9b18651d66 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -82,14 +82,13 @@ class ActionBase(with_metaclass(ABCMeta, object)): * Module parameters. These are stored in self._task.args """ + # store the module invocation details into the results results = {} - # This does not respect no_log set by module args, left here for debugging module invocation - #if self._task.async == 0: - # # store the module invocation details into the results - # results['invocation'] = dict( - # module_name = self._task.action, - # module_args = self._task.args, - # ) + if self._task.async == 0: + results['invocation'] = dict( + module_name = self._task.action, + module_args = self._task.args, + ) return results def _configure_module(self, module_name, module_args, task_vars=None): From d32a885e98f9154f5c74afba482b4299a2e2be5e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 19 Dec 2015 11:24:59 -0800 Subject: [PATCH 0209/1113] Make return invocation information so that our sanitized copy will take precedence over what the executor knows. --- lib/ansible/module_utils/basic.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 62b8cadfd61..4870ed096dd 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1431,7 +1431,6 @@ class AnsibleModule(object): self.log(msg, log_args=log_args) - def _set_cwd(self): try: cwd = os.getcwd() @@ -1524,6 +1523,8 @@ class AnsibleModule(object): self.add_path_info(kwargs) if not 'changed' in kwargs: kwargs['changed'] = False + if 'invocation' not in kwargs: + kwargs['invocation'] = self.params kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) @@ -1534,6 +1535,8 @@ class AnsibleModule(object): self.add_path_info(kwargs) assert 'msg' in kwargs, "implementation error -- msg to explain the error is required" kwargs['failed'] = True + if 'invocation' not in kwargs: + kwargs['invocation'] = self.params kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) From 51cca87d67823f4edfc4e05bf3e5a4070e494113 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 19 Dec 2015 11:27:16 -0800 Subject: [PATCH 0210/1113] Also need redhat-rpm-config to compile pycrypto --- .../roles/ansible_test_deps/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 89f7382a1e4..de08126b82d 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -39,6 +39,7 @@ - python-virtualenv - yum - yum-metadata-parser + - redhat-rpm-config when: ansible_os_family == 'RedHat' - name: Install Debian ansible dependencies From 8ffc1fa838d7e984f4a99568021660cbbd243550 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 19 Dec 2015 11:31:46 -0800 Subject: [PATCH 0211/1113] Comment to explain why we strip _ansible_notify specially --- lib/ansible/plugins/action/normal.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py index bf93fdad2d7..f9b55e1ff57 100644 --- a/lib/ansible/plugins/action/normal.py +++ b/lib/ansible/plugins/action/normal.py @@ -28,11 +28,13 @@ class ActionModule(ActionBase): results = super(ActionModule, self).run(tmp, task_vars) results.update(self._execute_module(tmp=tmp, task_vars=task_vars)) - # Remove special fields from the result, which can only be set # internally by the executor engine. We do this only here in # the 'normal' action, as other action plugins may set this. - for field in ('ansible_notify',): + # + # We don't want modules to determine that running the module fires + # notify handlers. That's for the playbook to decide. + for field in ('_ansible_notify',): if field in results: results.pop(field) From 224d5963361deb33107e5f38fd28a4d5197f931e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 19 Dec 2015 11:51:16 -0800 Subject: [PATCH 0212/1113] Remove args from get_name() as we can't tell if any of the args are no_log --- lib/ansible/playbook/task.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 825ee502691..fb757864745 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -107,11 +107,10 @@ class Task(Base, Conditional, Taggable, Become): elif self.name: return self.name else: - flattened_args = self._merge_kv(self.args) if self._role: - return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args) + return "%s : %s" % (self._role.get_name(), self.action) else: - return "%s %s" % (self.action, flattened_args) + return "%s" % (self.action,) def _merge_kv(self, ds): if ds is None: From 9abef1a1d7e8df5e580e17ef4a54cec280fbc7dc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 19 Dec 2015 12:39:48 -0800 Subject: [PATCH 0213/1113] Troubleshooting has reduced us to this --- test/integration/roles/test_get_url/tasks/main.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index cbf3b345f18..54debc06d10 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -96,12 +96,22 @@ register: get_url_result ignore_errors: True +- name: TROUBLESHOOTING + shell: curl https://foo.sni.velox.ch/ > /var/tmp/velox.html + register: trouble + ignore_errors: True + when: "{{ python_has_ssl_context }}" + +- debug: var=trouble + when: "{{ python_has_ssl_context }}" + +- debug: var=get_url_result + when: "{{ python_has_ssl_context }}" + - command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" -# If distros start backporting SNI, can make a new conditional based on whether this works: -# python -c 'from ssl import SSLContext' - debug: var=get_url_result - name: Assert that SNI works with this python version assert: From e66c070e5c0d50f0a90fcd3b73044a6faeef7c81 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 19 Dec 2015 13:00:58 -0800 Subject: [PATCH 0214/1113] Add package module to squash list --- lib/ansible/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 7f74358dd5d..5df9602246a 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -201,7 +201,7 @@ DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pa # the module takes both, bad things could happen. # In the future we should probably generalize this even further # (mapping of param: squash field) -DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, yum, pkgng, zypper, dnf", islist=True) +DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, dnf, package, pkgng, yum, zypper", islist=True) # paths DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispath=True) DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispath=True) From bb2935549f38a83670baadb74041ef98902e0640 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 19 Dec 2015 16:14:56 -0500 Subject: [PATCH 0215/1113] corrected service detection in docker versions now if 1 == bash it falls back into tool detection --- lib/ansible/module_utils/facts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 94a5a11f726..796ebc92bdd 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -555,8 +555,8 @@ class Facts(object): if proc_1 is None: rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True) - if proc_1 in ['init', '/sbin/init']: - # many systems return init, so this cannot be trusted + if proc_1 in ['init', '/sbin/init', 'bash']: + # many systems return init, so this cannot be trusted, bash is from docker proc_1 = None # if not init/None it should be an identifiable or custom init, so we are done! From e2d9f4e2f272c6010b0c00257aa695c1606e05ab Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 19 Dec 2015 15:49:06 -0800 Subject: [PATCH 0216/1113] Fix unittests for return of invocation from fail_json and exit_json --- test/units/module_utils/basic/test_exit_json.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/test/units/module_utils/basic/test_exit_json.py b/test/units/module_utils/basic/test_exit_json.py index 66610ec3ed3..931447f8ab6 100644 --- a/test/units/module_utils/basic/test_exit_json.py +++ b/test/units/module_utils/basic/test_exit_json.py @@ -56,7 +56,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=False)) + self.assertEquals(return_val, dict(changed=False, invocation={})) def test_exit_json_args_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -67,7 +67,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", changed=False)) + self.assertEquals(return_val, dict(msg="message", changed=False, invocation={})) def test_fail_json_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -78,13 +78,13 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 1) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", failed=True)) + self.assertEquals(return_val, dict(msg="message", failed=True, invocation={})) def test_exit_json_proper_changed(self): with self.assertRaises(SystemExit) as ctx: self.module.exit_json(changed=True, msg='success') return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=True, msg='success')) + self.assertEquals(return_val, dict(changed=True, msg='success', invocation={})) @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): @@ -94,19 +94,22 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/', - not_secret='following the leader', changed=False, msg='here') + not_secret='following the leader', changed=False, msg='here', + invocation=dict(password=OMIT, token=None, username='person')), ), (dict(username='person', password='password12345'), dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/', - not_secret='following the leader', changed=False, msg='here') + not_secret='following the leader', changed=False, msg='here', + invocation=dict(password=OMIT, token=None, username='person')), ), (dict(username='person', password='$ecret k3y'), dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/', - not_secret='following the leader', changed=False, msg='here') + not_secret='following the leader', changed=False, msg='here', + invocation=dict(password=OMIT, token=None, username='person')), ), ) From 3ec0104128103c4c37c117b5ef4548733245bcf4 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 19 Dec 2015 12:49:06 -0500 Subject: [PATCH 0217/1113] Fixing bugs in conditional testing with until and some integration runner tweaks --- lib/ansible/executor/task_executor.py | 8 ++--- lib/ansible/playbook/conditional.py | 34 +++++++++---------- lib/ansible/playbook/task.py | 2 +- .../main.yml | 2 +- .../roles/ansible_test_deps/tasks/main.yml | 1 + .../roles/run_integration/tasks/main.yml | 17 +++++----- 6 files changed, 33 insertions(+), 31 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index b0a5157a525..c8b6fa179bc 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -35,7 +35,7 @@ from ansible.template import Templar from ansible.utils.encrypt import key_for_hostname from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unicode import to_unicode -from ansible.vars.unsafe_proxy import UnsafeProxy +from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var try: from __main__ import display @@ -406,7 +406,7 @@ class TaskExecutor: # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: - vars_copy[self._task.register] = result + vars_copy[self._task.register] = wrap_var(result.copy()) if self._task.async > 0: # the async_wrapper module returns dumped JSON via its stdout @@ -453,7 +453,7 @@ class TaskExecutor: if attempt < retries - 1: cond = Conditional(loader=self._loader) - cond.when = self._task.until + cond.when = [ self._task.until ] if cond.evaluate_conditional(templar, vars_copy): break @@ -466,7 +466,7 @@ class TaskExecutor: # do the final update of the local variables here, for both registered # values and any facts which may have been created if self._task.register: - variables[self._task.register] = result + variables[self._task.register] = wrap_var(result) if 'ansible_facts' in result: variables.update(result['ansible_facts']) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index fc178e2fa1d..c8c6a9359ec 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -22,7 +22,7 @@ __metaclass__ = type from jinja2.exceptions import UndefinedError from ansible.compat.six import text_type -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleUndefinedVariable from ansible.playbook.attribute import FieldAttribute from ansible.template import Templar @@ -89,16 +89,22 @@ class Conditional: # make sure the templar is using the variables specifed to this method templar.set_available_variables(variables=all_vars) - conditional = templar.template(conditional) - if not isinstance(conditional, basestring) or conditional == "": - return conditional + try: + conditional = templar.template(conditional) + if not isinstance(conditional, text_type) or conditional == "": + return conditional - # a Jinja2 evaluation that results in something Python can eval! - presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional - conditional = templar.template(presented, fail_on_undefined=False) - - val = conditional.strip() - if val == presented: + # a Jinja2 evaluation that results in something Python can eval! + presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional + conditional = templar.template(presented) + val = conditional.strip() + if val == "True": + return True + elif val == "False": + return False + else: + raise AnsibleError("unable to evaluate conditional: %s" % original) + except (AnsibleUndefinedVariable, UndefinedError) as e: # the templating failed, meaning most likely a # variable was undefined. If we happened to be # looking for an undefined variable, return True, @@ -108,11 +114,5 @@ class Conditional: elif "is defined" in original: return False else: - raise AnsibleError("error while evaluating conditional: %s (%s)" % (original, presented)) - elif val == "True": - return True - elif val == "False": - return False - else: - raise AnsibleError("unable to evaluate conditional: %s" % original) + raise AnsibleError("error while evaluating conditional (%s): %s" % (original, e)) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index fb757864745..62b8cbc999b 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -82,7 +82,7 @@ class Task(Base, Conditional, Taggable, Become): _poll = FieldAttribute(isa='int') _register = FieldAttribute(isa='string') _retries = FieldAttribute(isa='int', default=3) - _until = FieldAttribute(isa='list') + _until = FieldAttribute(isa='string') def __init__(self, block=None, role=None, task_include=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 4aa17d11c1f..27c4ae51b0d 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -74,4 +74,4 @@ - name: Fail shell: 'echo "{{ inventory_hostname }}, Failed" && exit 1' - when: "test_results.rc != 0" + when: "'rc' not in test_results or test_results.rc != 0" diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index de08126b82d..d9611497e91 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -59,6 +59,7 @@ when: ansible_os_family == 'Debian' - name: update ca certificates + sudo: true yum: name=ca-certificates state=latest when: ansible_os_family == 'RedHat' diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 3eba8285443..2d01999dbfd 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -6,10 +6,12 @@ - name: Get ansible source dir sudo: false - shell: "cd ~ && pwd" + shell: "cd ~/ansible && pwd" register: results -- shell: ". hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" + args: + chdir: "{{ results.stdout }}" async: 3600 poll: 0 register: async_test_results @@ -17,14 +19,13 @@ environment: TEST_FLAGS: "{{ run_integration_test_flags|default(lookup('env', 'TEST_FLAGS')) }}" CREDENTIALS_FILE: "{{ run_integration_credentials_file|default(lookup('env', 'CREDENTIALS_FILE')) }}" - args: - chdir: "{{ results.stdout }}/ansible" - name: poll for test results - async_status: - jid: "{{async_test_results.ansible_job_id}}" + async_status: jid="{{async_test_results.ansible_job_id}}" register: test_results until: test_results.finished - retries: 360 - delay: 10 + retries: 120 + delay: 30 ignore_errors: true + +- debug: var=test_results From 3da312da9c1a92d5e8f47f3274338e4ef476b5a6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 19 Dec 2015 23:11:25 -0800 Subject: [PATCH 0218/1113] Switch from yum to package when installing sudo so that dnf is handled as well --- .../roles/ansible_test_deps/tasks/main.yml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index d9611497e91..832138527f9 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -4,14 +4,8 @@ when: ansible_os_family == 'Debian' - name: Install sudo - yum: name=sudo state=installed + package: name=sudo state=installed ignore_errors: true - when: ansible_os_family == 'RedHat' - -- name: Install sudo - apt: name=sudo state=installed - ignore_errors: true - when: ansible_os_family == 'Debian' - name: Install RH epel yum: name="epel-release" state=installed From 6ec58bbd5f86bd4f2ca8aa6e7af78ee8ef28ee98 Mon Sep 17 00:00:00 2001 From: Branko Majic <branko@majic.rs> Date: Sun, 20 Dec 2015 14:19:20 +0100 Subject: [PATCH 0219/1113] Adding documentation for the 'dig' lookup (#13126). --- docsite/rst/playbooks_lookups.rst | 106 ++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst index 25560e284d4..3c2222c337b 100644 --- a/docsite/rst/playbooks_lookups.rst +++ b/docsite/rst/playbooks_lookups.rst @@ -240,6 +240,112 @@ If you're not using 2.0 yet, you can do something similar with the credstash too debug: msg="Poor man's credstash lookup! {{ lookup('pipe', 'credstash -r us-west-1 get my-other-password') }}" +.. _dns_lookup: + +The DNS Lookup (dig) +```````````````````` +.. versionadded:: 1.9.0 + +.. warning:: This lookup depends on the `dnspython <http://www.dnspython.org/>`_ + library. + +The ``dig`` lookup runs queries against DNS servers to retrieve DNS records for +a specific name (*FQDN* - fully qualified domain name). It is possible to lookup any DNS record in this manner. + +There is a couple of different syntaxes that can be used to specify what record +should be retrieved, and for which name. It is also possible to explicitly +specify the DNS server(s) to use for lookups. + +In its simplest form, the ``dig`` lookup plugin can be used to retrieve an IPv4 +address (DNS ``A`` record) associated with *FQDN*: + +.. note:: If you need to obtain the ``AAAA`` record (IPv6 address), you must + specify the record type explicitly. Syntax for specifying the record + type is described below. + +.. note:: The trailing dot in most of the examples listed is purely optional, + but is specified for completeness/correctness sake. + +:: + + - debug: msg="The IPv4 address for example.com. is {{ lookup('dig', 'example.com.')}}" + +In addition to (default) ``A`` record, it is also possible to specify a different +record type that should be queried. This can be done by either passing-in +additional parameter of format ``qtype=TYPE`` to the ``dig`` lookup, or by +appending ``/TYPE`` to the *FQDN* being queried. For example:: + + - debug: msg="The TXT record for gmail.com. is {{ lookup('dig', 'gmail.com.', 'qtype=TXT') }}" + - debug: msg="The TXT record for gmail.com. is {{ lookup('dig', 'gmail.com./TXT') }}" + +If multiple values are associated with the requested record, the results will be +returned as a comma-separated list. In such cases you may want to pass option +``wantlist=True`` to the plugin, which will result in the record values being +returned as a list over which you can iterate later on:: + + - debug: msg="One of the MX records for gmail.com. is {{ item }}" + with_items: "{{ lookup('dig', 'gmail.com./MX', wantlist=True) }}" + +In case of reverse DNS lookups (``PTR`` records), you can also use a convenience +syntax of format ``IP_ADDRESS/PTR``. The following three lines would produce the +same output:: + + - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8/PTR') }}" + - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8.in-addr.arpa./PTR') }}" + - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8.in-addr.arpa.', 'qtype=PTR') }}" + +By default, the lookup will rely on system-wide configured DNS servers for +performing the query. It is also possible to explicitly specify DNS servers to +query using the ``@DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N`` notation. This +needs to be passed-in as an additional parameter to the lookup. For example:: + + - debug: msg="Querying 8.8.8.8 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@8.8.8.8') }}" + +In some cases the DNS records may hold a more complex data structure, or it may +be useful to obtain the results in a form of a dictionary for future +processing. The ``dig`` lookup supports parsing of a number of such records, +with the result being returned as a dictionary. This way it is possible to +easily access such nested data. This return format can be requested by +passing-in the ``flat=0`` option to the lookup. For example:: + + - debug: msg="XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}" + with_items: "{{ lookup('dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}" + +Take note that due to the way Ansible lookups work, you must pass the +``wantlist=True`` argument to the lookup, otherwise Ansible will report errors. + +Currently the dictionary results are supported for the following records: + +.. note:: *ALL* is not a record per-se, merely the listed fields are available + for any record results you retrieve in the form of a dictionary. + +========== ============================================================================= +Record Fields +---------- ----------------------------------------------------------------------------- +*ALL* owner, ttl, type +A address +AAAA address +CNAME target +DNAME target +DLV algorithm, digest_type, key_tag, digest +DNSKEY flags, algorithm, protocol, key +DS algorithm, digest_type, key_tag, digest +HINFO cpu, os +LOC latitude, longitude, altitude, size, horizontal_precision, vertical_precision +MX preference, exchange +NAPTR order, preference, flags, service, regexp, replacement +NS target +NSEC3PARAM algorithm, flags, iterations, salt +PTR target +RP mbox, txt +SOA mname, rname, serial, refresh, retry, expire, minimum +SPF strings +SRV priority, weight, port, target +SSHFP algorithm, fp_type, fingerprint +TLSA usage, selector, mtype, cert +TXT strings +========== ============================================================================= + .. _more_lookups: More Lookups From b90506341ac77c4885efe754ae401b90b0f61a7f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 20 Dec 2015 08:06:26 -0800 Subject: [PATCH 0220/1113] Fixes for tests that assumed yum as package manager for systems that have dnf --- .../roles/ec2_elb_instance_setup/tasks/main.yml | 7 ++++++- .../roles/setup_postgresql_db/tasks/main.yml | 8 ++++---- test/integration/roles/test_apt/tasks/main.yml | 1 - .../test_docker/tasks/docker-setup-rht.yml | 17 ++++++++--------- .../roles/test_unarchive/tasks/main.yml | 4 ++++ test/integration/roles/test_yum/tasks/main.yml | 2 ++ 6 files changed, 24 insertions(+), 15 deletions(-) diff --git a/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml b/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml index 341392b00c7..79584893ed8 100644 --- a/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml +++ b/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml @@ -5,7 +5,12 @@ # install apache on the ec2 instances - name: install apache on new ec2 instances - yum: name=httpd + package: name=httpd + when: ansible_os_family == 'RedHat' + +- name: install apache on new ec2 instances + package: name=apache + when: ansible_os_family == 'Debian' - name: start and enable apache service: name=httpd state=started enabled=yes diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml index fbcc9cab725..c25318a2adc 100644 --- a/test/integration/roles/setup_postgresql_db/tasks/main.yml +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -9,9 +9,9 @@ # Make sure we start fresh - name: remove rpm dependencies for postgresql test - yum: name={{ item }} state=absent + package: name={{ item }} state=absent with_items: postgresql_packages - when: ansible_pkg_mgr == 'yum' + when: ansible_os_family == "RedHat" - name: remove dpkg dependencies for postgresql test apt: name={{ item }} state=absent @@ -35,9 +35,9 @@ when: ansible_os_family == "Debian" - name: install rpm dependencies for postgresql test - yum: name={{ item }} state=latest + package: name={{ item }} state=latest with_items: postgresql_packages - when: ansible_pkg_mgr == 'yum' + when: ansible_os_family == "RedHat" - name: install dpkg dependencies for postgresql test apt: name={{ item }} state=latest diff --git a/test/integration/roles/test_apt/tasks/main.yml b/test/integration/roles/test_apt/tasks/main.yml index 8976087371d..552b543d2d3 100644 --- a/test/integration/roles/test_apt/tasks/main.yml +++ b/test/integration/roles/test_apt/tasks/main.yml @@ -1,4 +1,3 @@ -# test code for the yum module # (c) 2014, James Tanner <tanner.jc@gmail.com> # This file is part of Ansible diff --git a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml index 3ba234ecffc..c25821c3be0 100644 --- a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml +++ b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml @@ -1,18 +1,17 @@ -- name: Install docker packages (yum) - yum: +- name: Install docker packages (rht family) + package: state: present name: docker-io,docker-registry,python-docker-py,nginx -- name: Install netcat - yum: +- name: Install netcat (Fedora) + package: state: present name: nmap-ncat - # RHEL7 as well... - when: ansible_distribution == 'Fedora' + when: ansible_distribution == 'Fedora' or (ansible_os_family == 'RedHat' and ansible_distribution_version|version_compare('>=', 7)) -- name: Install netcat - yum: +- name: Install netcat (RHEL) + package: state: present name: nc - when: ansible_distribution != 'Fedora' + when: ansible_distribution != 'Fedora' and (ansible_os_family == 'RedHat' and ansible_distribution_version|version_compare('<', 7)) diff --git a/test/integration/roles/test_unarchive/tasks/main.yml b/test/integration/roles/test_unarchive/tasks/main.yml index c26d3aeb101..e4f438e5256 100644 --- a/test/integration/roles/test_unarchive/tasks/main.yml +++ b/test/integration/roles/test_unarchive/tasks/main.yml @@ -21,6 +21,10 @@ yum: name=zip state=latest when: ansible_pkg_mgr == 'yum' +- name: Ensure zip is present to create test archive (dnf) + dnf: name=zip state=latest + when: ansible_pkg_mgr == 'dnf' + - name: Ensure zip is present to create test archive (apt) apt: name=zip state=latest when: ansible_pkg_mgr == 'apt' diff --git a/test/integration/roles/test_yum/tasks/main.yml b/test/integration/roles/test_yum/tasks/main.yml index 5df887ae9f9..b17af6b465b 100644 --- a/test/integration/roles/test_yum/tasks/main.yml +++ b/test/integration/roles/test_yum/tasks/main.yml @@ -16,6 +16,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# Note: We install the yum package onto Fedora so that this will work on dnf systems +# We want to test that for people who don't want to upgrade their systems. - include: 'yum.yml' when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora'] From 5fef2c429763db8d088a20c97320936ee06e7fc8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 20 Dec 2015 09:11:53 -0800 Subject: [PATCH 0221/1113] Try updating the centos7 image to a newer version (trying to resolve issue being unable to connect to some webservers) --- test/utils/ansible-playbook_integration_runner/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 27c4ae51b0d..f1bd26b7ead 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -19,7 +19,7 @@ platform: "centos-6.5-x86_64" - distribution: "CentOS" version: "7" - image: "ami-96a818fe" + image: "ami-61bbf104" ssh_user: "centos" platform: "centos-7-x86_64" - distribution: "Fedora" From 6ae04c1e4f698629610030a74f5bb5fc501f5a1e Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 20 Dec 2015 12:37:24 -0500 Subject: [PATCH 0222/1113] Fix logic in PlayIterator when inserting tasks during rescue/always Because the fail_state is potentially non-zero in these block sections, the prior logic led to included tasks not being inserted at all. Related issue: #13605 --- lib/ansible/executor/play_iterator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 795eed2a8c1..534f216c30a 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -397,7 +397,7 @@ class PlayIterator: def _insert_tasks_into_state(self, state, task_list): # if we've failed at all, or if the task list is empty, just return the current state - if state.fail_state != self.FAILED_NONE or not task_list: + if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list: return state if state.run_state == self.ITERATING_TASKS: From 8d7892cc7b7a95c4efda003c8b187d1bc4875a5f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 20 Dec 2015 10:13:33 -0800 Subject: [PATCH 0223/1113] Done troubleshooting Revert "Troubleshooting has reduced us to this" This reverts commit 9abef1a1d7e8df5e580e17ef4a54cec280fbc7dc. --- test/integration/roles/test_get_url/tasks/main.yml | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 54debc06d10..cbf3b345f18 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -96,22 +96,12 @@ register: get_url_result ignore_errors: True -- name: TROUBLESHOOTING - shell: curl https://foo.sni.velox.ch/ > /var/tmp/velox.html - register: trouble - ignore_errors: True - when: "{{ python_has_ssl_context }}" - -- debug: var=trouble - when: "{{ python_has_ssl_context }}" - -- debug: var=get_url_result - when: "{{ python_has_ssl_context }}" - - command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" +# If distros start backporting SNI, can make a new conditional based on whether this works: +# python -c 'from ssl import SSLContext' - debug: var=get_url_result - name: Assert that SNI works with this python version assert: From 3792a586b51ce598ab71bfab004a4bd97f004101 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 20 Dec 2015 11:33:42 -0800 Subject: [PATCH 0224/1113] Since the velox test server seems to be dropping using iptables to drop requests from aws, test via a different website instead --- .../roles/test_get_url/tasks/main.yml | 45 +++++++++++++++---- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index cbf3b345f18..a0ff3797a87 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -85,23 +85,51 @@ - "result.changed == true" - "stat_result.stat.exists == true" -# SNI Tests -# SNI is only built into the stdlib from python-2.7.9 onwards +# At the moment, AWS can't make an https request to velox.ch... connection +# timed out. So we'll use a different test until/unless the problem is resolved +## SNI Tests +## SNI is only built into the stdlib from python-2.7.9 onwards +#- name: Test that SNI works +# get_url: +# # A test site that returns a page with information on what SNI information +# # the client sent. A failure would have the string: did not send a TLS server name indication extension +# url: 'https://foo.sni.velox.ch/' +# dest: "{{ output_dir }}/sni.html" +# register: get_url_result +# ignore_errors: True +# +#- command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" +# register: data_result +# when: "{{ python_has_ssl_context }}" +# +#- debug: var=get_url_result +#- name: Assert that SNI works with this python version +# assert: +# that: +# - 'data_result.rc == 0' +# - '"failed" not in get_url_result' +# when: "{{ python_has_ssl_context }}" +# +## If the client doesn't support SNI then get_url should have failed with a certificate mismatch +#- name: Assert that hostname verification failed because SNI is not supported on this version of python +# assert: +# that: +# - 'get_url_result["failed"]' +# when: "{{ not python_has_ssl_context }}" + +# These tests are just side effects of how the site is hosted. It's not +# specifically a test site. So the tests may break due to the hosting changing - name: Test that SNI works get_url: - # A test site that returns a page with information on what SNI information - # the client sent. A failure would have the string: did not send a TLS server name indication extension - url: 'https://foo.sni.velox.ch/' + url: 'https://www.mnot.net/blog/2014/05/09/if_you_can_read_this_youre_sniing' dest: "{{ output_dir }}/sni.html" register: get_url_result ignore_errors: True -- command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" +- command: "grep '<h2>If You Can Read This, You're SNIing</h2>' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" -# If distros start backporting SNI, can make a new conditional based on whether this works: -# python -c 'from ssl import SSLContext' - debug: var=get_url_result - name: Assert that SNI works with this python version assert: @@ -116,3 +144,4 @@ that: - 'get_url_result["failed"]' when: "{{ not python_has_ssl_context }}" +# End hacky SNI test section From 21ca0ce1ce12eb4e487d479abdc355972d2c2309 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 20 Dec 2015 11:46:49 -0800 Subject: [PATCH 0225/1113] Fix test playbook syntax --- test/integration/roles/test_get_url/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index a0ff3797a87..630287c9871 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -126,7 +126,7 @@ register: get_url_result ignore_errors: True -- command: "grep '<h2>If You Can Read This, You're SNIing</h2>' {{ output_dir}}/sni.html" +- command: "grep '<h2>If You Can Read This, You\\'re SNIing</h2>' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" From 6963955cb4a607c8548669136cb266c25d9f9ceb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 20 Dec 2015 11:51:32 -0800 Subject: [PATCH 0226/1113] And change the task a little more since different shlex versions are handling the quotes differently --- test/integration/roles/test_get_url/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 630287c9871..9ed0549ec47 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -126,7 +126,7 @@ register: get_url_result ignore_errors: True -- command: "grep '<h2>If You Can Read This, You\\'re SNIing</h2>' {{ output_dir}}/sni.html" +- command: "grep '<h2>If You Can Read This, You.re SNIing</h2>' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" From 8b5e5538285f03c360807fd1e09c00a77d52bd94 Mon Sep 17 00:00:00 2001 From: Rene Moser <rene.moser@swisstxt.ch> Date: Mon, 16 Nov 2015 18:38:27 +0100 Subject: [PATCH 0227/1113] cloudstack: add tests for cs_volume --- test/integration/cloudstack.yml | 1 + .../roles/test_cs_volume/defaults/main.yml | 6 + .../roles/test_cs_volume/meta/main.yml | 3 + .../roles/test_cs_volume/tasks/main.yml | 183 ++++++++++++++++++ 4 files changed, 193 insertions(+) create mode 100644 test/integration/roles/test_cs_volume/defaults/main.yml create mode 100644 test/integration/roles/test_cs_volume/meta/main.yml create mode 100644 test/integration/roles/test_cs_volume/tasks/main.yml diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml index 93ba7876d8c..3ad4ed08349 100644 --- a/test/integration/cloudstack.yml +++ b/test/integration/cloudstack.yml @@ -22,3 +22,4 @@ - { role: test_cs_account, tags: test_cs_account } - { role: test_cs_firewall, tags: test_cs_firewall } - { role: test_cs_loadbalancer_rule, tags: test_cs_loadbalancer_rule } + - { role: test_cs_volume, tags: test_cs_volume } diff --git a/test/integration/roles/test_cs_volume/defaults/main.yml b/test/integration/roles/test_cs_volume/defaults/main.yml new file mode 100644 index 00000000000..546469f33fc --- /dev/null +++ b/test/integration/roles/test_cs_volume/defaults/main.yml @@ -0,0 +1,6 @@ +--- +test_cs_instance_1: "{{ cs_resource_prefix }}-vm1" +test_cs_instance_2: "{{ cs_resource_prefix }}-vm2" +test_cs_instance_template: CentOS 5.3(64-bit) no GUI (Simulator) +test_cs_instance_offering_1: Small Instance +test_cs_disk_offering_1: Small diff --git a/test/integration/roles/test_cs_volume/meta/main.yml b/test/integration/roles/test_cs_volume/meta/main.yml new file mode 100644 index 00000000000..03e38bd4f7a --- /dev/null +++ b/test/integration/roles/test_cs_volume/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_volume/tasks/main.yml b/test/integration/roles/test_cs_volume/tasks/main.yml new file mode 100644 index 00000000000..fa1f1026028 --- /dev/null +++ b/test/integration/roles/test_cs_volume/tasks/main.yml @@ -0,0 +1,183 @@ +--- +- name: setup + cs_volume: name={{ cs_resource_prefix }}_vol state=absent + register: vol +- name: verify setup + assert: + that: + - vol|success + +- name: setup instance 1 + cs_instance: + name: "{{ test_cs_instance_1 }}" + template: "{{ test_cs_instance_template }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + register: instance +- name: verify create instance + assert: + that: + - instance|success + +- name: setup instance 2 + cs_instance: + name: "{{ test_cs_instance_2 }}" + template: "{{ test_cs_instance_template }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + register: instance +- name: verify create instance + assert: + that: + - instance|success + +- name: test fail if missing name + action: cs_volume + register: vol + ignore_errors: true +- name: verify results of fail if missing name + assert: + that: + - vol|failed + - "vol.msg == 'missing required arguments: name'" + +- name: test create volume + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + disk_offering: "{{ test_cs_disk_offering_1 }}" + register: vol +- name: verify results test create volume + assert: + that: + - vol|changed + - vol.name == "{{ cs_resource_prefix }}_vol" + +- name: test create volume idempotence + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + disk_offering: "{{ test_cs_disk_offering_1 }}" + register: vol +- name: verify results test create volume idempotence + assert: + that: + - not vol|changed + - vol.name == "{{ cs_resource_prefix }}_vol" + +- name: test attach volume + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + vm: "{{ test_cs_instance_1 }}" + state: attached + register: vol +- name: verify results test attach volume + assert: + that: + - vol|changed + - vol.name == "{{ cs_resource_prefix }}_vol" + - vol.vm == "{{ test_cs_instance_1 }}" + - vol.attached is defined + +- name: test attach volume idempotence + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + vm: "{{ test_cs_instance_1 }}" + state: attached + register: vol +- name: verify results test attach volume idempotence + assert: + that: + - not vol|changed + - vol.name == "{{ cs_resource_prefix }}_vol" + - vol.vm == "{{ test_cs_instance_1 }}" + - vol.attached is defined + +- name: test attach attached volume to another vm + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + vm: "{{ test_cs_instance_2 }}" + state: attached + register: vol +- name: verify results test attach attached volume to another vm + assert: + that: + - vol|changed + - vol.name == "{{ cs_resource_prefix }}_vol" + - vol.vm == "{{ test_cs_instance_2 }}" + - vol.attached is defined + +- name: test attach attached volume to another vm idempotence + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + vm: "{{ test_cs_instance_2 }}" + state: attached + register: vol +- name: verify results test attach attached volume to another vm idempotence + assert: + that: + - not vol|changed + - vol.name == "{{ cs_resource_prefix }}_vol" + - vol.vm == "{{ test_cs_instance_2 }}" + - vol.attached is defined + +- name: test detach volume + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + state: detached + register: vol +- name: verify results test detach volume + assert: + that: + - vol|changed + - vol.name == "{{ cs_resource_prefix }}_vol" + - vol.attached is undefined + +- name: test detach volume idempotence + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + state: detached + register: vol +- name: verify results test detach volume idempotence + assert: + that: + - not vol|changed + - vol.name == "{{ cs_resource_prefix }}_vol" + - vol.attached is undefined + +- name: test delete volume + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + state: absent + register: vol +- name: verify results test create volume + assert: + that: + - vol|changed + - vol.name == "{{ cs_resource_prefix }}_vol" + +- name: test delete volume idempotence + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + state: absent + register: vol +- name: verify results test delete volume idempotence + assert: + that: + - not vol|changed + +- name: cleanup instance 1 + cs_instance: + name: "{{ test_cs_instance_1 }}" + state: absent + register: instance +- name: verify create instance + assert: + that: + - instance|success + +- name: cleanup instance 2 + cs_instance: + name: "{{ test_cs_instance_2 }}" + state: absent + register: instance +- name: verify create instance + assert: + that: + - instance|success From b0525da8c879faf837fba026c908bf0521e7629f Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Sat, 5 Dec 2015 15:19:43 +0100 Subject: [PATCH 0228/1113] cloudstack: cs_volume: add tests for volume resize See https://github.com/ansible/ansible-modules-extras/pull/1333 --- .../roles/test_cs_volume/defaults/main.yml | 2 +- .../roles/test_cs_volume/tasks/main.yml | 32 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/test/integration/roles/test_cs_volume/defaults/main.yml b/test/integration/roles/test_cs_volume/defaults/main.yml index 546469f33fc..311a99bbe82 100644 --- a/test/integration/roles/test_cs_volume/defaults/main.yml +++ b/test/integration/roles/test_cs_volume/defaults/main.yml @@ -3,4 +3,4 @@ test_cs_instance_1: "{{ cs_resource_prefix }}-vm1" test_cs_instance_2: "{{ cs_resource_prefix }}-vm2" test_cs_instance_template: CentOS 5.3(64-bit) no GUI (Simulator) test_cs_instance_offering_1: Small Instance -test_cs_disk_offering_1: Small +test_cs_disk_offering_1: Custom diff --git a/test/integration/roles/test_cs_volume/tasks/main.yml b/test/integration/roles/test_cs_volume/tasks/main.yml index fa1f1026028..ae57039cee8 100644 --- a/test/integration/roles/test_cs_volume/tasks/main.yml +++ b/test/integration/roles/test_cs_volume/tasks/main.yml @@ -43,22 +43,54 @@ cs_volume: name: "{{ cs_resource_prefix }}_vol" disk_offering: "{{ test_cs_disk_offering_1 }}" + size: 20 register: vol - name: verify results test create volume assert: that: - vol|changed + - vol.size == 20 * 1024 ** 3 - vol.name == "{{ cs_resource_prefix }}_vol" - name: test create volume idempotence cs_volume: name: "{{ cs_resource_prefix }}_vol" disk_offering: "{{ test_cs_disk_offering_1 }}" + size: 20 register: vol - name: verify results test create volume idempotence assert: that: - not vol|changed + - vol.size == 20 * 1024 ** 3 + - vol.name == "{{ cs_resource_prefix }}_vol" + +- name: test shrink volume + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + disk_offering: "{{ test_cs_disk_offering_1 }}" + size: 10 + shrink_ok: yes + register: vol +- name: verify results test create volume + assert: + that: + - vol|changed + - vol.size == 10 * 1024 ** 3 + - vol.name == "{{ cs_resource_prefix }}_vol" + +- name: test shrink volume idempotence + cs_volume: + name: "{{ cs_resource_prefix }}_vol" + disk_offering: "{{ test_cs_disk_offering_1 }}" + size: 10 + shrink_ok: yes + register: vol +- name: verify results test create volume + assert: + that: + - not vol|changed + - vol.size == 10 * 1024 ** 3 - vol.name == "{{ cs_resource_prefix }}_vol" - name: test attach volume From b85b92ecdd03429fd84d384a495fbb5894da9ab0 Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Mon, 14 Dec 2015 14:23:44 +0100 Subject: [PATCH 0229/1113] cloudstack: test_cs_instance: more integration tests cloudstack: extend test_cs_instance addressing recovering cloudstack: test_cs_instance: add tests for using display_name as indentifier. --- .../roles/test_cs_instance/tasks/absent.yml | 20 ++ .../tasks/absent_display_name.yml | 43 +++++ .../roles/test_cs_instance/tasks/cleanup.yml | 6 - .../roles/test_cs_instance/tasks/main.yml | 5 + .../roles/test_cs_instance/tasks/present.yml | 37 +++- .../tasks/present_display_name.yml | 176 ++++++++++++++++++ .../roles/test_cs_instance/tasks/setup.yml | 8 - 7 files changed, 272 insertions(+), 23 deletions(-) create mode 100644 test/integration/roles/test_cs_instance/tasks/absent_display_name.yml create mode 100644 test/integration/roles/test_cs_instance/tasks/present_display_name.yml diff --git a/test/integration/roles/test_cs_instance/tasks/absent.yml b/test/integration/roles/test_cs_instance/tasks/absent.yml index bafb3ec9e76..eeab47a61d7 100644 --- a/test/integration/roles/test_cs_instance/tasks/absent.yml +++ b/test/integration/roles/test_cs_instance/tasks/absent.yml @@ -21,3 +21,23 @@ that: - instance|success - not instance|changed + +- name: test recover to stopped state and update a deleted instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + state: stopped + register: instance +- name: verify test recover to stopped state and update a deleted instance + assert: + that: + - instance|success + - instance|changed + - instance.state == "Stopped" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + +# force expunge, only works with admin permissions +- cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: expunged + failed_when: false diff --git a/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml b/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml new file mode 100644 index 00000000000..35fa6dff34f --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml @@ -0,0 +1,43 @@ +--- +- name: test destroy instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: absent + register: instance +- name: verify destroy instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.state == "Destroyed" + +- name: test destroy instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: absent + register: instance +- name: verify destroy instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + +- name: test recover to stopped state and update a deleted instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + state: stopped + register: instance +- name: verify test recover to stopped state and update a deleted instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.state == "Stopped" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + +# force expunge, only works with admin permissions +- cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: expunged + failed_when: false diff --git a/test/integration/roles/test_cs_instance/tasks/cleanup.yml b/test/integration/roles/test_cs_instance/tasks/cleanup.yml index 63192dbd608..e6b6550dfa1 100644 --- a/test/integration/roles/test_cs_instance/tasks/cleanup.yml +++ b/test/integration/roles/test_cs_instance/tasks/cleanup.yml @@ -28,9 +28,3 @@ assert: that: - sg|success - -# force expunge, only works with admin permissions -- cs_instance: - name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" - state: expunged - failed_when: false diff --git a/test/integration/roles/test_cs_instance/tasks/main.yml b/test/integration/roles/test_cs_instance/tasks/main.yml index d1a67e17810..d6475a47664 100644 --- a/test/integration/roles/test_cs_instance/tasks/main.yml +++ b/test/integration/roles/test_cs_instance/tasks/main.yml @@ -4,3 +4,8 @@ - include: tags.yml - include: absent.yml - include: cleanup.yml + +- include: setup.yml +- include: present_display_name.yml +- include: absent_display_name.yml +- include: cleanup.yml diff --git a/test/integration/roles/test_cs_instance/tasks/present.yml b/test/integration/roles/test_cs_instance/tasks/present.yml index 10242a57fd2..ad3d391ef9c 100644 --- a/test/integration/roles/test_cs_instance/tasks/present.yml +++ b/test/integration/roles/test_cs_instance/tasks/present.yml @@ -1,4 +1,12 @@ --- +- name: setup instance to be absent + cs_instance: name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent + register: instance +- name: verify instance to be absent + assert: + that: + - instance|success + - name: test create instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -21,7 +29,6 @@ - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" - not instance.tags - - name: test create instance idempotence cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -44,7 +51,6 @@ - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" - not instance.tags - - name: test running instance not updated cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -60,7 +66,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Running" - - name: test stopping instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -76,7 +81,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Stopped" - - name: test stopping instance idempotence cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -89,7 +93,6 @@ - not instance|changed - instance.state == "Stopped" - - name: test updating stopped instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -106,7 +109,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_2 }}" - instance.state == "Stopped" - - name: test starting instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -122,7 +124,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_2 }}" - instance.state == "Running" - - name: test starting instance idempotence cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -133,6 +134,9 @@ that: - instance|success - not instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" - instance.state == "Running" - name: test force update running instance @@ -147,7 +151,7 @@ - instance|success - instance|changed - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" - - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Running" @@ -163,6 +167,21 @@ - instance|success - not instance|changed - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" - - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Running" + +- name: test restore instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + state: restored + register: instance +- name: verify restore instance + assert: + that: + - instance|success + - instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" diff --git a/test/integration/roles/test_cs_instance/tasks/present_display_name.yml b/test/integration/roles/test_cs_instance/tasks/present_display_name.yml new file mode 100644 index 00000000000..c1882149d9d --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/present_display_name.yml @@ -0,0 +1,176 @@ +--- +- name: setup instance with display_name to be absent + cs_instance: display_name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent + register: instance +- name: verify instance with display_name to be absent + assert: + that: + - instance|success + +- name: test create instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + affinity_group: "{{ cs_resource_prefix }}-ag" + security_group: "{{ cs_resource_prefix }}-sg" + ssh_key: "{{ cs_resource_prefix }}-sshkey" + tags: [] + register: instance +- name: verify create instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" + - not instance.tags + +- name: test create instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + affinity_group: "{{ cs_resource_prefix }}-ag" + security_group: "{{ cs_resource_prefix }}-sg" + ssh_key: "{{ cs_resource_prefix }}-sshkey" + tags: [] + register: instance +- name: verify create instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" + - not instance.tags + +- name: test running instance with display_name not updated + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_2 }}" + register: instance +- name: verify running instance with display_name not updated + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + +- name: test stopping instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: stopped + register: instance +- name: verify stopping instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Stopped" + +- name: test stopping instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: stopped + register: instance +- name: verify stopping instance idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.state == "Stopped" + +- name: test updating stopped instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_2 }}" + register: instance +- name: verify updating stopped instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" + - instance.state == "Stopped" + +- name: test starting instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: started + register: instance +- name: verify starting instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" + - instance.state == "Running" + +- name: test starting instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: started + register: instance +- name: verify starting instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" + - instance.state == "Running" + +- name: test force update running instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + force: true + register: instance +- name: verify force update running instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + +- name: test force update running instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + force: true + register: instance +- name: verify force update running instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + +- name: test restore instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + state: restored + register: instance +- name: verify restore instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" diff --git a/test/integration/roles/test_cs_instance/tasks/setup.yml b/test/integration/roles/test_cs_instance/tasks/setup.yml index 32f3ff13e24..0039ce8f1be 100644 --- a/test/integration/roles/test_cs_instance/tasks/setup.yml +++ b/test/integration/roles/test_cs_instance/tasks/setup.yml @@ -22,11 +22,3 @@ assert: that: - sg|success - -- name: setup instance to be absent - cs_instance: name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent - register: instance -- name: verify instance to be absent - assert: - that: - - instance|success From 3a57d9472c6788ce6fbb700108fbc776527fc3df Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 20 Dec 2015 17:55:39 -0500 Subject: [PATCH 0230/1113] Save output of integration test results to files we can archive --- .../roles/run_integration/tasks/main.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 2d01999dbfd..f67f088246c 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -28,4 +28,14 @@ delay: 30 ignore_errors: true -- debug: var=test_results +- name: save stdout test results for each host + local_action: copy + args: + dest: "{{sync_dir}}/{{inventory_hostname}}.stdout_results.txt" + content: "{{test_results.stdout}}" + +- name: save stderr test results for each host + local_action: copy + args: + dest: "{{sync_dir}}/{{inventory_hostname}}.stderr_results.txt" + content: "{{test_results.stderr}}" From 54455a06e55756b31493fd25b1871146c8fe6ab2 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 20 Dec 2015 21:32:37 -0500 Subject: [PATCH 0231/1113] Disable docker test for Fedora, due to broken packaging --- test/integration/destructive.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index 626124d14f1..3e8cca385e6 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -17,5 +17,5 @@ - { role: test_mysql_db, tags: test_mysql_db} - { role: test_mysql_user, tags: test_mysql_user} - { role: test_mysql_variables, tags: test_mysql_variables} - - { role: test_docker, tags: test_docker} + - { role: test_docker, tags: test_docker, when: ansible_distribution != "Fedora" } - { role: test_zypper, tags: test_zypper} From a4674906c60da6035345c2bbe89983b5a6e3b69d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= <yannig.perre@gmail.com> Date: Mon, 21 Dec 2015 13:01:58 -0500 Subject: [PATCH 0232/1113] Merge role params into variables separately from other variables Fixes #13617 --- lib/ansible/playbook/role/__init__.py | 6 ++++++ lib/ansible/vars/__init__.py | 1 + 2 files changed, 7 insertions(+) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index f308954f528..ce82573dc03 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -265,6 +265,12 @@ class Role(Base, Become, Conditional, Taggable): inherited_vars = combine_vars(inherited_vars, parent._role_params) return inherited_vars + def get_role_params(self): + params = {} + for dep in self.get_all_dependencies(): + params = combine_vars(params, dep._role_params) + return params + def get_vars(self, dep_chain=[], include_params=True): all_vars = self.get_inherited_vars(dep_chain, include_params=include_params) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 1184ec50492..699333a5896 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -308,6 +308,7 @@ class VariableManager: if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): + all_vars = combine_vars(all_vars, role.get_role_params()) all_vars = combine_vars(all_vars, role.get_vars(include_params=False)) if task: From 593d80c63d408012550850eb06d85387588cee3b Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 21 Dec 2015 13:14:51 -0500 Subject: [PATCH 0233/1113] role search path clarified --- docsite/rst/playbooks_roles.rst | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index c6c01db5d48..2e1173acda9 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -191,11 +191,8 @@ This designates the following behaviors, for each role 'x': - If roles/x/handlers/main.yml exists, handlers listed therein will be added to the play - If roles/x/vars/main.yml exists, variables listed therein will be added to the play - If roles/x/meta/main.yml exists, any role dependencies listed therein will be added to the list of roles (1.3 and later) -- Any copy tasks can reference files in roles/x/files/ without having to path them relatively or absolutely -- Any script tasks can reference scripts in roles/x/files/ without having to path them relatively or absolutely -- Any template tasks can reference files in roles/x/templates/ without having to path them relatively or absolutely -- Any include tasks can reference files in roles/x/tasks/ without having to path them relatively or absolutely - +- Any copy, script, template or include tasks (in the role) can reference files in roles/x/files/ without having to path them relatively or absolutely + In Ansible 1.4 and later you can configure a roles_path to search for roles. Use this to check all of your common roles out to one location, and share them easily between multiple playbook projects. See :doc:`intro_configuration` for details about how to set this up in ansible.cfg. From 75e94e0cba538c9ed532374b219c45e91fd89db8 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 21 Dec 2015 13:06:48 -0500 Subject: [PATCH 0234/1113] allow for non standard hostnames * Changed parse_addresses to throw exceptions instead of passing None * Switched callers to trap and pass through the original values. * Added very verbose notice * Look at deprecating this and possibly validate at plugin instead fixes #13608 --- lib/ansible/inventory/__init__.py | 21 ++++++++++++--------- lib/ansible/inventory/ini.py | 11 +++++++---- lib/ansible/parsing/utils/addresses.py | 22 +++++++++++----------- lib/ansible/plugins/action/add_host.py | 10 +++++++--- test/units/parsing/test_addresses.py | 14 ++++++++++++-- 5 files changed, 49 insertions(+), 29 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 95e193f381a..095118e50eb 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -109,7 +109,12 @@ class Inventory(object): pass elif isinstance(host_list, list): for h in host_list: - (host, port) = parse_address(h, allow_ranges=False) + try: + (host, port) = parse_address(h, allow_ranges=False) + except AnsibleError as e: + display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_string(e)) + host = h + port = None all.add_host(Host(host, port)) elif self._loader.path_exists(host_list): #TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins' @@ -228,15 +233,13 @@ class Inventory(object): # If it doesn't, it could still be a single pattern. This accounts for # non-separator uses of colons: IPv6 addresses and [x:y] host ranges. else: - (base, port) = parse_address(pattern, allow_ranges=True) - if base: + try: + (base, port) = parse_address(pattern, allow_ranges=True) patterns = [pattern] - - # The only other case we accept is a ':'-separated list of patterns. - # This mishandles IPv6 addresses, and is retained only for backwards - # compatibility. - - else: + except: + # The only other case we accept is a ':'-separated list of patterns. + # This mishandles IPv6 addresses, and is retained only for backwards + # compatibility. patterns = re.findall( r'''(?: # We want to match something comprising: [^\s:\[\]] # (anything other than whitespace or ':[]' diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 537fde1ef9e..9224ef2d23d 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -23,7 +23,7 @@ import ast import re from ansible import constants as C -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleParserError from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range @@ -264,9 +264,12 @@ class InventoryParser(object): # Can the given hostpattern be parsed as a host with an optional port # specification? - (pattern, port) = parse_address(hostpattern, allow_ranges=True) - if not pattern: - self._raise_error("Can't parse '%s' as host[:port]" % hostpattern) + try: + (pattern, port) = parse_address(hostpattern, allow_ranges=True) + except: + # not a recognizable host pattern + pattern = hostpattern + port = None # Once we have separated the pattern, we expand it into list of one or # more hostnames, depending on whether it contains any [x:y] ranges. diff --git a/lib/ansible/parsing/utils/addresses.py b/lib/ansible/parsing/utils/addresses.py index 387f05c627f..ebfd850ac6a 100644 --- a/lib/ansible/parsing/utils/addresses.py +++ b/lib/ansible/parsing/utils/addresses.py @@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re +from ansible.errors import AnsibleParserError, AnsibleError # Components that match a numeric or alphanumeric begin:end or begin:end:step # range expression inside square brackets. @@ -162,6 +163,7 @@ patterns = { $ '''.format(label=label), re.X|re.I|re.UNICODE ), + } def parse_address(address, allow_ranges=False): @@ -183,8 +185,8 @@ def parse_address(address, allow_ranges=False): # First, we extract the port number if one is specified. port = None - for type in ['bracketed_hostport', 'hostport']: - m = patterns[type].match(address) + for matching in ['bracketed_hostport', 'hostport']: + m = patterns[matching].match(address) if m: (address, port) = m.groups() port = int(port) @@ -194,22 +196,20 @@ def parse_address(address, allow_ranges=False): # numeric ranges, or a hostname with alphanumeric ranges. host = None - for type in ['ipv4', 'ipv6', 'hostname']: - m = patterns[type].match(address) + for matching in ['ipv4', 'ipv6', 'hostname']: + m = patterns[matching].match(address) if m: host = address continue # If it isn't any of the above, we don't understand it. - if not host: - return (None, None) - - # If we get to this point, we know that any included ranges are valid. If - # the caller is prepared to handle them, all is well. Otherwise we treat - # it as a parse failure. + raise AnsibleError("Not a valid network hostname: %s" % address) + # If we get to this point, we know that any included ranges are valid. + # If the caller is prepared to handle them, all is well. + # Otherwise we treat it as a parse failure. if not allow_ranges and '[' in host: - return (None, None) + raise AnsibleParserError("Detected range in host but was asked to ignore ranges") return (host, port) diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py index 4bf43f14009..b3aec20437e 100644 --- a/lib/ansible/plugins/action/add_host.py +++ b/lib/ansible/plugins/action/add_host.py @@ -53,9 +53,13 @@ class ActionModule(ActionBase): new_name = self._task.args.get('name', self._task.args.get('hostname', None)) display.vv("creating host via 'add_host': hostname=%s" % new_name) - name, port = parse_address(new_name, allow_ranges=False) - if not name: - raise AnsibleError("Invalid inventory hostname: %s" % new_name) + try: + name, port = parse_address(new_name, allow_ranges=False) + except: + # not a parsable hostname, but might still be usable + name = new_name + port = None + if port: self._task.args['ansible_ssh_port'] = port diff --git a/test/units/parsing/test_addresses.py b/test/units/parsing/test_addresses.py index 870cbb0a14a..a688d0253bd 100644 --- a/test/units/parsing/test_addresses.py +++ b/test/units/parsing/test_addresses.py @@ -71,7 +71,12 @@ class TestParseAddress(unittest.TestCase): for t in self.tests: test = self.tests[t] - (host, port) = parse_address(t) + try: + (host, port) = parse_address(t) + except: + host = None + port = None + assert host == test[0] assert port == test[1] @@ -79,6 +84,11 @@ class TestParseAddress(unittest.TestCase): for t in self.range_tests: test = self.range_tests[t] - (host, port) = parse_address(t, allow_ranges=True) + try: + (host, port) = parse_address(t, allow_ranges=True) + except: + host = None + port = None + assert host == test[0] assert port == test[1] From 08b580decce79deac3c7c2d828d6a8ef9dd6e70c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 21 Dec 2015 14:09:02 -0500 Subject: [PATCH 0235/1113] Parallelize make command for integration test runner Also adds a new var, used by the prepare_tests role, to prevent it from deleting the temp test directory at the start of each play to avoid any potential race conditions --- test/integration/roles/prepare_tests/tasks/main.yml | 1 + .../roles/run_integration/tasks/main.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/roles/prepare_tests/tasks/main.yml b/test/integration/roles/prepare_tests/tasks/main.yml index 3641880baa1..7983ea52361 100644 --- a/test/integration/roles/prepare_tests/tasks/main.yml +++ b/test/integration/roles/prepare_tests/tasks/main.yml @@ -22,6 +22,7 @@ always_run: True tags: - prepare + when: clean_working_dir|default("yes")|bool - name: create the test directory file: name={{output_dir}} state=directory diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index f67f088246c..8a306a8ada4 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -9,7 +9,7 @@ shell: "cd ~/ansible && pwd" register: results -- shell: "ls -la && . hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j4 {{ run_integration_make_target }}" args: chdir: "{{ results.stdout }}" async: 3600 From 6d6822e66e43658c01b68bab2ed897e0ef31c784 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 21 Dec 2015 14:37:17 -0500 Subject: [PATCH 0236/1113] Kick up the integration runner test image size --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index d4740d95708..55619776d90 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -2,7 +2,7 @@ ec2: group_id: 'sg-07bb906d' # jenkins-slave_new count: 1 - instance_type: 'm3.medium' + instance_type: 'm3.large' image: '{{ item.image }}' wait: true region: 'us-east-1' From 45afa642c3a69d209fefd7debfb38df9d8b757fd Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 21 Dec 2015 15:48:58 -0500 Subject: [PATCH 0237/1113] Integration test runner tweaks --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- .../roles/run_integration/tasks/main.yml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index 55619776d90..8a48f0ce6e2 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -2,7 +2,7 @@ ec2: group_id: 'sg-07bb906d' # jenkins-slave_new count: 1 - instance_type: 'm3.large' + instance_type: 'm3.xlarge' image: '{{ item.image }}' wait: true region: 'us-east-1' diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 8a306a8ada4..6b37d85c2e7 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -3,13 +3,14 @@ synchronize: src: "{{ sync_dir }}/" dest: "~/ansible" + no_log: true - name: Get ansible source dir sudo: false shell: "cd ~/ansible && pwd" register: results -- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j4 {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j2 {{ run_integration_make_target }}" args: chdir: "{{ results.stdout }}" async: 3600 @@ -27,6 +28,7 @@ retries: 120 delay: 30 ignore_errors: true + no_log: true - name: save stdout test results for each host local_action: copy From 8119ea37afe5e94a1d98cec9fe7ae760b10a9adc Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 21 Dec 2015 15:55:16 -0500 Subject: [PATCH 0238/1113] Dropping instance size back down since we're not doing parallel builds --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index 8a48f0ce6e2..55619776d90 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -2,7 +2,7 @@ ec2: group_id: 'sg-07bb906d' # jenkins-slave_new count: 1 - instance_type: 'm3.xlarge' + instance_type: 'm3.large' image: '{{ item.image }}' wait: true region: 'us-east-1' From d22bbbf52c08e03b63d6045768f3000531f875e9 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 21 Dec 2015 16:11:53 -0500 Subject: [PATCH 0239/1113] Actually disable parallel makes for integration runner --- .../roles/run_integration/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 6b37d85c2e7..a833c96558d 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -10,7 +10,7 @@ shell: "cd ~/ansible && pwd" register: results -- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j2 {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make {{ run_integration_make_target }}" args: chdir: "{{ results.stdout }}" async: 3600 From 0c013f592a31c06baac7aadf27d23598f6abe931 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 21 Dec 2015 13:52:41 -0800 Subject: [PATCH 0240/1113] Transform the command we pass to subprocess into a byte string in _low_level-exec_command --- lib/ansible/plugins/action/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e9b18651d66..e88a55a15cc 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -487,7 +487,8 @@ class ActionBase(with_metaclass(ABCMeta, object)): verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' - + # We may need to revisit this later. + cmd = to_bytes(cmd, errors='strict') if executable is not None: cmd = executable + ' -c ' + cmd From 0f4d1eb051ce4aa6863f6ec86b00b43ccc277c5a Mon Sep 17 00:00:00 2001 From: Andrew Gaffney <andrew@agaffney.org> Date: Fri, 18 Dec 2015 01:56:15 +0000 Subject: [PATCH 0241/1113] Add 'filtered' stdout callback plugin This plugin filters output for any task that is 'ok' or 'skipped'. It works by subclassing the 'default' stdout callback plugin and overriding certain functions. It will suppress display of the task banner until there is a 'changed' or 'failed' result or an unreachable host. --- lib/ansible/plugins/callback/filtered.py | 76 ++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 lib/ansible/plugins/callback/filtered.py diff --git a/lib/ansible/plugins/callback/filtered.py b/lib/ansible/plugins/callback/filtered.py new file mode 100644 index 00000000000..094c37ed985 --- /dev/null +++ b/lib/ansible/plugins/callback/filtered.py @@ -0,0 +1,76 @@ +# (c) 2015, Andrew Gaffney <andrew@agaffney.org> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default + +class CallbackModule(CallbackModule_default): + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'filtered' + + def __init__(self): + self.super_ref = super(CallbackModule, self) + self.super_ref.__init__() + self.last_task = None + self.shown_title = False + + def v2_playbook_on_task_start(self, task, is_conditional): + self.last_task = task + self.shown_title = False + + def display_task_banner(self): + if not self.shown_title: + self.super_ref.v2_playbook_on_task_start(self.last_task, None) + self.shown_title = True + + def v2_runner_on_failed(self, result, ignore_errors=False): + self.display_task_banner() + self.super_ref.v2_runner_on_failed(result, ignore_errors) + + def v2_runner_on_ok(self, result): + if result._result.get('changed', False): + self.display_task_banner() + self.super_ref.v2_runner_on_ok(result) + else: + pass + + def v2_runner_on_unreachable(self, result): + self.display_task_banner() + self.super_ref.v2_runner_on_unreachable(result) + + def v2_runner_on_skipped(self, result): + pass + + def v2_playbook_on_include(self, included_file): + pass + + def v2_playbook_item_on_ok(self, result): + self.display_task_banner() + self.super_ref.v2_playbook_item_on_ok(result) + + def v2_playbook_item_on_skipped(self, result): + pass + + def v2_playbook_item_on_failed(self, result): + self.display_task_banner() + self.super_ref.v2_playbook_item_on_failed(result) + From bbdfaf052209242fbd262860aeda81e59d694243 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 22 Dec 2015 00:24:35 -0500 Subject: [PATCH 0242/1113] move hostvars.vars to vars this fixes duplication under hostvars and exposes all vars in the vars dict which makes dynamic reference possible on 'non hostvars' --- lib/ansible/vars/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 699333a5896..4135ff17687 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -259,8 +259,6 @@ class VariableManager: except KeyError: pass - all_vars['vars'] = all_vars.copy() - if play: all_vars = combine_vars(all_vars, play.get_vars()) @@ -343,6 +341,8 @@ class VariableManager: all_vars['ansible_delegated_vars'] = self._get_delegated_vars(loader, play, task, all_vars) #VARIABLE_CACHE[cache_entry] = all_vars + if task or play: + all_vars['vars'] = all_vars.copy() debug("done with get_vars()") return all_vars From c60749c9222c8139042a0f4280d6622b209de550 Mon Sep 17 00:00:00 2001 From: Monty Taylor <mordred@inaugust.com> Date: Tue, 22 Dec 2015 09:14:12 -0600 Subject: [PATCH 0243/1113] Also convert ints to bool for type=bool --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 62b8cadfd61..8a135b300f1 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1274,7 +1274,7 @@ class AnsibleModule(object): if isinstance(value, bool): return value - if isinstance(value, basestring): + if isinstance(value, basestring) or isinstance(value, int): return self.boolean(value) raise TypeError('%s cannot be converted to a bool' % type(value)) From b310d0ce76c05bb7a7a47aa7b7537b9adc916171 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 22 Dec 2015 07:22:44 -0800 Subject: [PATCH 0244/1113] Update the developing doc to modern method of specifying bool argspec values --- docsite/rst/developing_modules.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index fde4b5704b6..39bfd9e3d9c 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -247,7 +247,7 @@ And instantiating the module class like:: argument_spec = dict( state = dict(default='present', choices=['present', 'absent']), name = dict(required=True), - enabled = dict(required=True, choices=BOOLEANS), + enabled = dict(required=True, type='bool'), something = dict(aliases=['whatever']) ) ) @@ -335,7 +335,7 @@ and guidelines: * If you have a company module that returns facts specific to your installations, a good name for this module is `site_facts`. -* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "choices=BOOLEANS" and a module.boolean(value) casting function. +* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "type='bool'" and a module.boolean(value) casting function. * Include a minimum of dependencies if possible. If there are dependencies, document them at the top of the module file, and have the module raise JSON error messages when the import fails. From b33f72636a3b7f3a256185afde1aae3d9703235e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 22 Dec 2015 07:25:50 -0800 Subject: [PATCH 0245/1113] Also remove the bool casting function info (transparent to module writer now) --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 39bfd9e3d9c..141f81bd08b 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -335,7 +335,7 @@ and guidelines: * If you have a company module that returns facts specific to your installations, a good name for this module is `site_facts`. -* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "type='bool'" and a module.boolean(value) casting function. +* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "type='bool'". * Include a minimum of dependencies if possible. If there are dependencies, document them at the top of the module file, and have the module raise JSON error messages when the import fails. From c4da5840b5e38aea1740e68f7100256c93dfbb17 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 22 Dec 2015 08:22:02 -0800 Subject: [PATCH 0246/1113] Convert to bytes later so that make_become_command can jsut operate on text type. --- lib/ansible/plugins/action/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e88a55a15cc..765ba663164 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -487,8 +487,6 @@ class ActionBase(with_metaclass(ABCMeta, object)): verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' - # We may need to revisit this later. - cmd = to_bytes(cmd, errors='strict') if executable is not None: cmd = executable + ' -c ' + cmd @@ -505,7 +503,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): cmd = self._play_context.make_become_cmd(cmd, executable=executable) display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) - rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) + rc, stdout, stderr = self._connection.exec_command(to_bytes(cmd, errors='strict'), in_data=in_data, sudoable=sudoable) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type From b22d998d1d9acbda6f458ea99d7e5266d69e035c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= <yannig.perre@gmail.com> Date: Tue, 22 Dec 2015 16:30:29 +0100 Subject: [PATCH 0247/1113] Fix make tests-py3 on devel. Fix for https://github.com/ansible/ansible/issues/13638. --- test/units/plugins/action/test_action.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index 0e47b6a5381..dcd04375959 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -42,14 +42,14 @@ class TestActionBase(unittest.TestCase): play_context.become = True play_context.become_user = play_context.remote_user = 'root' - play_context.make_become_cmd = Mock(return_value='CMD') + play_context.make_become_cmd = Mock(return_value=b'CMD') - action_base._low_level_execute_command('ECHO', sudoable=True) + action_base._low_level_execute_command(b'ECHO', sudoable=True) play_context.make_become_cmd.assert_not_called() play_context.remote_user = 'apo' - action_base._low_level_execute_command('ECHO', sudoable=True) - play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None) + action_base._low_level_execute_command(b'ECHO', sudoable=True) + play_context.make_become_cmd.assert_called_once_with(b'ECHO', executable=None) play_context.make_become_cmd.reset_mock() @@ -57,7 +57,7 @@ class TestActionBase(unittest.TestCase): C.BECOME_ALLOW_SAME_USER = True try: play_context.remote_user = 'root' - action_base._low_level_execute_command('ECHO SAME', sudoable=True) - play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None) + action_base._low_level_execute_command(b'ECHO SAME', sudoable=True) + play_context.make_become_cmd.assert_called_once_with(b'ECHO SAME', executable=None) finally: C.BECOME_ALLOW_SAME_USER = become_allow_same_user From 010839aedc5d903b7ef2fac1b564642cd036e95e Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 22 Dec 2015 17:15:58 -0500 Subject: [PATCH 0248/1113] fix no_log disclosure when using aliases --- lib/ansible/module_utils/basic.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 4aee3b4169d..91ea874d859 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -516,6 +516,7 @@ class AnsibleModule(object): self._debug = False self.aliases = {} + self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug'] if add_file_common_args: for k, v in FILE_COMMON_ARGUMENTS.items(): @@ -524,6 +525,14 @@ class AnsibleModule(object): self.params = self._load_params() + # append to legal_inputs and then possibly check against them + try: + self.aliases = self._handle_aliases() + except Exception, e: + # use exceptions here cause its not safe to call vail json until no_log is processed + print('{"failed": true, "msg": "Module alias error: %s"}' % str(e)) + sys.exit(1) + # Save parameter values that should never be logged self.no_log_values = set() # Use the argspec to determine which args are no_log @@ -538,10 +547,6 @@ class AnsibleModule(object): # reset to LANG=C if it's an invalid/unavailable locale self._check_locale() - self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug'] - - # append to legal_inputs and then possibly check against them - self.aliases = self._handle_aliases() self._check_arguments(check_invalid_arguments) @@ -1064,6 +1069,7 @@ class AnsibleModule(object): self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e) def _handle_aliases(self): + # this uses exceptions as it happens before we can safely call fail_json aliases_results = {} #alias:canon for (k,v) in self.argument_spec.items(): self._legal_inputs.append(k) @@ -1072,11 +1078,11 @@ class AnsibleModule(object): required = v.get('required', False) if default is not None and required: # not alias specific but this is a good place to check this - self.fail_json(msg="internal error: required and default are mutually exclusive for %s" % k) + raise Exception("internal error: required and default are mutually exclusive for %s" % k) if aliases is None: continue if type(aliases) != list: - self.fail_json(msg='internal error: aliases must be a list') + raise Exception('internal error: aliases must be a list') for alias in aliases: self._legal_inputs.append(alias) aliases_results[alias] = k From 202b92179d247e508fe4190edc28614b136a5b89 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 22 Dec 2015 22:09:45 -0500 Subject: [PATCH 0249/1113] corrected role path search order the unfraking was matching roles in current dir as it always returns a full path, pushed to the bottom as match of last resort fixes #13645 --- lib/ansible/playbook/role/definition.py | 70 ++++++++++++------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py index 7e8f47e9be8..0af49cec91c 100644 --- a/lib/ansible/playbook/role/definition.py +++ b/lib/ansible/playbook/role/definition.py @@ -135,46 +135,44 @@ class RoleDefinition(Base, Become, Conditional, Taggable): append it to the default role path ''' - role_path = unfrackpath(role_name) + # we always start the search for roles in the base directory of the playbook + role_search_paths = [ + os.path.join(self._loader.get_basedir(), u'roles'), + self._loader.get_basedir(), + ] + # also search in the configured roles path + if C.DEFAULT_ROLES_PATH: + configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) + role_search_paths.extend(configured_paths) + + # finally, append the roles basedir, if it was set, so we can + # search relative to that directory for dependent roles + if self._role_basedir: + role_search_paths.append(self._role_basedir) + + # create a templar class to template the dependency names, in + # case they contain variables + if self._variable_manager is not None: + all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) + else: + all_vars = dict() + + templar = Templar(loader=self._loader, variables=all_vars) + role_name = templar.template(role_name) + + # now iterate through the possible paths and return the first one we find + for path in role_search_paths: + path = templar.template(path) + role_path = unfrackpath(os.path.join(path, role_name)) + if self._loader.path_exists(role_path): + return (role_name, role_path) + + # if not found elsewhere try to extract path from name + role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) - else: - # we always start the search for roles in the base directory of the playbook - role_search_paths = [ - os.path.join(self._loader.get_basedir(), u'roles'), - u'./roles', - self._loader.get_basedir(), - u'./' - ] - - # also search in the configured roles path - if C.DEFAULT_ROLES_PATH: - configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) - role_search_paths.extend(configured_paths) - - # finally, append the roles basedir, if it was set, so we can - # search relative to that directory for dependent roles - if self._role_basedir: - role_search_paths.append(self._role_basedir) - - # create a templar class to template the dependency names, in - # case they contain variables - if self._variable_manager is not None: - all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) - else: - all_vars = dict() - - templar = Templar(loader=self._loader, variables=all_vars) - role_name = templar.template(role_name) - - # now iterate through the possible paths and return the first one we find - for path in role_search_paths: - path = templar.template(path) - role_path = unfrackpath(os.path.join(path, role_name)) - if self._loader.path_exists(role_path): - return (role_name, role_path) raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds) From 957b376f9eb959f4f3627a622f7776a26442bf9c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 22 Dec 2015 22:45:25 -0500 Subject: [PATCH 0250/1113] better module error handling * now module errors clearly state msg=MODULE FAILURE * module's stdout and stderr go into module_stdout and module_stderr keys which only appear during parsing failure * invocation module_args are deleted from results provided by action plugin as errors can keep us from overwriting and then disclosing info that was meant to be kept hidden due to no_log * fixed invocation module_args set by basic.py as it was creating different keys as the invocation in action plugin base. * results now merge --- lib/ansible/module_utils/basic.py | 4 ++-- lib/ansible/plugins/action/__init__.py | 5 +++-- lib/ansible/plugins/action/normal.py | 5 ++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 91ea874d859..0391035e883 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1530,7 +1530,7 @@ class AnsibleModule(object): if not 'changed' in kwargs: kwargs['changed'] = False if 'invocation' not in kwargs: - kwargs['invocation'] = self.params + kwargs['invocation'] = {'module_args': self.params} kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) @@ -1542,7 +1542,7 @@ class AnsibleModule(object): assert 'msg' in kwargs, "implementation error -- msg to explain the error is required" kwargs['failed'] = True if 'invocation' not in kwargs: - kwargs['invocation'] = self.params + kwargs['invocation'] = {'module_args': self.params} kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 765ba663164..5383f8afd43 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -460,9 +460,10 @@ class ActionBase(with_metaclass(ABCMeta, object)): if 'stderr' in res and res['stderr'].startswith(u'Traceback'): data['exception'] = res['stderr'] else: - data['msg'] = res.get('stdout', u'') + data['msg'] = "MODULE FAILURE" + data['module_stdout'] = res.get('stdout', u'') if 'stderr' in res: - data['msg'] += res['stderr'] + data['module_stderr'] = res['stderr'] # pre-split stdout into lines, if stdout is in the data and there # isn't already a stdout_lines value there diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py index f9b55e1ff57..932ad8309c3 100644 --- a/lib/ansible/plugins/action/normal.py +++ b/lib/ansible/plugins/action/normal.py @@ -18,6 +18,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action import ActionBase +from ansible.utils.vars import merge_hash class ActionModule(ActionBase): @@ -27,7 +28,9 @@ class ActionModule(ActionBase): task_vars = dict() results = super(ActionModule, self).run(tmp, task_vars) - results.update(self._execute_module(tmp=tmp, task_vars=task_vars)) + # remove as modules might hide due to nolog + del results['invocation']['module_args'] + results = merge_hash(results, self._execute_module(tmp=tmp, task_vars=task_vars)) # Remove special fields from the result, which can only be set # internally by the executor engine. We do this only here in # the 'normal' action, as other action plugins may set this. From 809c9af68cac56180b336d6ebe29d70b9d10ac14 Mon Sep 17 00:00:00 2001 From: Matt Roberts <mattroberts297@gmail.com> Date: Wed, 23 Dec 2015 08:18:46 +0000 Subject: [PATCH 0251/1113] Update playbooks_intro.rst If you follow the documentation through in order you shouldn't have read about modules yet. --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 28c809f0132..55cd3359be6 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -41,7 +41,7 @@ Each playbook is composed of one or more 'plays' in a list. The goal of a play is to map a group of hosts to some well defined roles, represented by things ansible calls tasks. At a basic level, a task is nothing more than a call -to an ansible module, which you should have learned about in earlier chapters. +to an ansible module (see :doc:`Modules`). By composing a playbook of multiple 'plays', it is possible to orchestrate multi-machine deployments, running certain steps on all From 42b9a206ada579000a64cdcb7a0c82ecfd99c451 Mon Sep 17 00:00:00 2001 From: Michael Scherer <misc@zarb.org> Date: Wed, 23 Dec 2015 11:44:30 +0100 Subject: [PATCH 0252/1113] Fix last commit, make it python3 compatible (and py24) --- lib/ansible/module_utils/basic.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 91ea874d859..f9dc964e676 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -528,7 +528,8 @@ class AnsibleModule(object): # append to legal_inputs and then possibly check against them try: self.aliases = self._handle_aliases() - except Exception, e: + except Exception: + e = get_exception() # use exceptions here cause its not safe to call vail json until no_log is processed print('{"failed": true, "msg": "Module alias error: %s"}' % str(e)) sys.exit(1) From b201cf2ee13a9e4e1c5dc222043e3f1c84940044 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 23 Dec 2015 10:29:59 -0500 Subject: [PATCH 0253/1113] switched from pythonic None to generic null --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 141f81bd08b..d3781b2f7fd 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -481,7 +481,7 @@ Module checklist * The shebang should always be #!/usr/bin/python, this allows ansible_python_interpreter to work * Documentation: Make sure it exists * `required` should always be present, be it true or false - * If `required` is false you need to document `default`, even if the default is 'None' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code. + * If `required` is false you need to document `default`, even if the default is 'null' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code. * `default` is not needed for `required: true` * Remove unnecessary doc like `aliases: []` or `choices: []` * The version is not a float number and value the current development version From d89d7951e6fb84cdb04cc35e0aa962d59fe6f553 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 23 Dec 2015 11:45:07 -0500 Subject: [PATCH 0254/1113] fixed tests to follow new invocation structure also added maxdiff setting to see issues clearly when they happen --- .../module_utils/basic/test_exit_json.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/test/units/module_utils/basic/test_exit_json.py b/test/units/module_utils/basic/test_exit_json.py index 931447f8ab6..27bbb0f9e56 100644 --- a/test/units/module_utils/basic/test_exit_json.py +++ b/test/units/module_utils/basic/test_exit_json.py @@ -31,8 +31,11 @@ from ansible.module_utils import basic from ansible.module_utils.basic import heuristic_log_sanitize from ansible.module_utils.basic import return_values, remove_values +empty_invocation = {u'module_args': {}} + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") class TestAnsibleModuleExitJson(unittest.TestCase): + def setUp(self): self.COMPLEX_ARGS = basic.MODULE_COMPLEX_ARGS basic.MODULE_COMPLEX_ARGS = '{}' @@ -56,7 +59,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=False, invocation={})) + self.assertEquals(return_val, dict(changed=False, invocation=empty_invocation)) def test_exit_json_args_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -67,7 +70,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", changed=False, invocation={})) + self.assertEquals(return_val, dict(msg="message", changed=False, invocation=empty_invocation)) def test_fail_json_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -78,13 +81,13 @@ class TestAnsibleModuleExitJson(unittest.TestCase): else: self.assertEquals(ctx.exception.code, 1) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", failed=True, invocation={})) + self.assertEquals(return_val, dict(msg="message", failed=True, invocation=empty_invocation)) def test_exit_json_proper_changed(self): with self.assertRaises(SystemExit) as ctx: self.module.exit_json(changed=True, msg='success') return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=True, msg='success', invocation={})) + self.assertEquals(return_val, dict(changed=True, msg='success', invocation=empty_invocation)) @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): @@ -95,21 +98,21 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/', not_secret='following the leader', changed=False, msg='here', - invocation=dict(password=OMIT, token=None, username='person')), + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), (dict(username='person', password='password12345'), dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/', not_secret='following the leader', changed=False, msg='here', - invocation=dict(password=OMIT, token=None, username='person')), + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), (dict(username='person', password='$ecret k3y'), dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/', not_secret='following the leader', changed=False, msg='here', - invocation=dict(password=OMIT, token=None, username='person')), + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), ) @@ -122,6 +125,7 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): sys.stdout = self.old_stdout def test_exit_json_removes_values(self): + self.maxDiff = None for args, return_val, expected in self.dataset: sys.stdout = StringIO() basic.MODULE_COMPLEX_ARGS = json.dumps(args) @@ -137,6 +141,7 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): self.assertEquals(json.loads(sys.stdout.getvalue()), expected) def test_fail_json_removes_values(self): + self.maxDiff = None for args, return_val, expected in self.dataset: expected = copy.deepcopy(expected) del expected['changed'] From 630a35adb0752dd9a4d74539b91b243bafb4c7d7 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Wed, 23 Dec 2015 14:57:24 -0600 Subject: [PATCH 0255/1113] Add ProxyCommand support to the paramiko connection plugin --- docsite/rst/intro_configuration.rst | 11 +++++++++++ lib/ansible/constants.py | 1 + .../plugins/connection/paramiko_ssh.py | 19 +++++++++++++++++++ 3 files changed, 31 insertions(+) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index ccfb456ed93..7f21c2e1f61 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -739,6 +739,17 @@ instead. Setting it to False will improve performance and is recommended when h record_host_keys=True +.. _paramiko_proxy_command + +proxy_command +============= + +.. versionadded:: 2.1 + +Use an OpenSSH like ProxyCommand for proxying all Paramiko SSH connections through a bastion or jump host. Requires a minimum of Paramiko version 1.9.0. On Enterprise Linux 6 this is provided by ``python-paramiko1.10`` in the EPEL repository:: + + proxy_command = ssh -W "%h:%p" bastion + .. _openssh_settings: OpenSSH Specific Settings diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 5df9602246a..7d6a76a19e3 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -244,6 +244,7 @@ ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) +PARAMIKO_PROXY_COMMAND = get_config(p, 'paramiko_connection', 'proxy_command', 'ANSIBLE_PARAMIKO_PROXY_COMMAND', None) # obsolete -- will be formally removed diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py index ab9ce90db95..ea6ca3809d1 100644 --- a/lib/ansible/plugins/connection/paramiko_ssh.py +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -158,6 +158,24 @@ class Connection(ConnectionBase): pass # file was not found, but not required to function ssh.load_system_host_keys() + sock_kwarg = {} + if C.PARAMIKO_PROXY_COMMAND: + replacers = { + '%h': self._play_context.remote_addr, + '%p': port, + '%r': self._play_context.remote_user + } + proxy_command = C.PARAMIKO_PROXY_COMMAND + for find, replace in replacers.items(): + proxy_command = proxy_command.replace(find, str(replace)) + try: + sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)} + display.vvv("CONFIGURE PROXY COMMAND FOR CONNECTION: %s" % proxy_command, host=self._play_context.remote_addr) + except AttributeError: + display.warning('Paramiko ProxyCommand support unavailable. ' + 'Please upgrade to Paramiko 1.9.0 or newer. ' + 'Not using configured ProxyCommand') + ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self)) allow_agent = True @@ -179,6 +197,7 @@ class Connection(ConnectionBase): password=self._play_context.password, timeout=self._play_context.timeout, port=port, + **sock_kwarg ) except Exception as e: msg = str(e) From fd7e01696f659e1a147887087c87e2bad9742209 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 23 Dec 2015 17:16:21 -0500 Subject: [PATCH 0256/1113] updated submodule refs to pick up module changes --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index fcb3397df79..002028748f0 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit fcb3397df7944ff15ea698b5717c06e8fc7d43ba +Subproject commit 002028748f080961ade801c30e194bfd4ba043ce diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index c6829752d85..19e496c69c2 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit c6829752d852398c255704cd5d7faa54342e143e +Subproject commit 19e496c69c22fc7ec1e3c8306b363a812b85d386 From deac4d00b22f9e0288f5e3c4633e07a7f937d47c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 24 Dec 2015 11:32:40 -0800 Subject: [PATCH 0257/1113] bigip changes as requested by bcoca and abadger: * Fix to error if validate_cert is True and python doesn't support it. * Only globally disable certificate checking if really needed. Use bigip verify parameter if available instead. * Remove public disable certificate function to make it less likely people will attempt to reuse that --- lib/ansible/module_utils/f5.py | 36 ++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/lib/ansible/module_utils/f5.py b/lib/ansible/module_utils/f5.py index e04e6b2f1ec..ba336377e7d 100644 --- a/lib/ansible/module_utils/f5.py +++ b/lib/ansible/module_utils/f5.py @@ -51,19 +51,35 @@ def f5_argument_spec(): def f5_parse_arguments(module): if not bigsuds_found: module.fail_json(msg="the python bigsuds module is required") - if not module.params['validate_certs']: - disable_ssl_cert_validation() + + if module.params['validate_certs']: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') + return (module.params['server'],module.params['user'],module.params['password'],module.params['state'],module.params['partition'],module.params['validate_certs']) -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api +def bigip_api(bigip, user, password, validate_certs): + try: + # bigsuds >= 1.0.3 + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs) + except TypeError: + # bigsuds < 1.0.3, no verify param + if validate_certs: + # Note: verified we have SSLContext when we parsed params + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + else: + import ssl + if hasattr(ssl, 'SSLContext'): + # Really, you should never do this. It disables certificate + # verification *globally*. But since older bigip libraries + # don't give us a way to toggle verification we need to + # disable it at the global level. + # From https://www.python.org/dev/peps/pep-0476/#id29 + ssl._create_default_https_context = ssl._create_unverified_context + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) -def disable_ssl_cert_validation(): - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context + return api # Fully Qualified name (with the partition) def fq_name(partition,name): From a8e0763d1ec003e5f83c1d848578f7a0a02c9df4 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Thu, 24 Dec 2015 15:00:53 -0600 Subject: [PATCH 0258/1113] Move _split_args from ssh.py to ConnectionBase so we can use it in other connection plugins --- lib/ansible/plugins/connection/__init__.py | 11 +++++++++++ lib/ansible/plugins/connection/ssh.py | 14 ++------------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 06616bac4ca..4b6c17dc32c 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -23,6 +23,7 @@ __metaclass__ = type import fcntl import gettext import os +import shlex from abc import ABCMeta, abstractmethod, abstractproperty from functools import wraps @@ -31,6 +32,7 @@ from ansible.compat.six import with_metaclass from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins import shell_loader +from ansible.utils.unicode import to_bytes, to_unicode try: from __main__ import display @@ -112,6 +114,15 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): ''' pass + @staticmethod + def _split_ssh_args(argstring): + """ + Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a + list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to + the argument list. The list will not contain any empty elements. + """ + return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] + @abstractproperty def transport(self): """String used to identify this Connection class from other classes""" diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index a2abcf20aee..3da701aa8e4 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -24,7 +24,6 @@ import os import pipes import pty import select -import shlex import subprocess import time @@ -100,15 +99,6 @@ class Connection(ConnectionBase): return controlpersist, controlpath - @staticmethod - def _split_args(argstring): - """ - Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a - list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to - the argument list. The list will not contain any empty elements. - """ - return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] - def _add_args(self, explanation, args): """ Adds the given args to self._command and displays a caller-supplied @@ -157,7 +147,7 @@ class Connection(ConnectionBase): # Next, we add [ssh_connection]ssh_args from ansible.cfg. if self._play_context.ssh_args: - args = self._split_args(self._play_context.ssh_args) + args = self._split_ssh_args(self._play_context.ssh_args) self._add_args("ansible.cfg set ssh_args", args) # Now we add various arguments controlled by configuration file settings @@ -210,7 +200,7 @@ class Connection(ConnectionBase): for opt in ['ssh_common_args', binary + '_extra_args']: attr = getattr(self._play_context, opt, None) if attr is not None: - args = self._split_args(attr) + args = self._split_ssh_args(attr) self._add_args("PlayContext set %s" % opt, args) # Check if ControlPersist is enabled and add a ControlPath if one hasn't From 0296209bc139d00d696a9d0722bee01f3bf99c2d Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Thu, 24 Dec 2015 15:01:41 -0600 Subject: [PATCH 0259/1113] Parse ansible_ssh_common_args looking for ProxyCommand, for use in paramiko --- .../plugins/connection/paramiko_ssh.py | 28 +++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py index ea6ca3809d1..47028a60a5a 100644 --- a/lib/ansible/plugins/connection/paramiko_ssh.py +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -32,6 +32,7 @@ import tempfile import traceback import fcntl import sys +import re from termios import tcflush, TCIFLUSH from binascii import hexlify @@ -55,6 +56,9 @@ The %s key fingerprint is %s. Are you sure you want to continue connecting (yes/no)? """ +# SSH Options Regex +SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)') + # prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/ HAVE_PARAMIKO=False with warnings.catch_warnings(): @@ -158,14 +162,34 @@ class Connection(ConnectionBase): pass # file was not found, but not required to function ssh.load_system_host_keys() + proxy_command = None + # Parse ansible_ssh_common_args, specifically looking for ProxyCommand + ssh_common_args = getattr(self._play_context, 'ssh_common_args', None) + if ssh_common_args is not None: + args = self._split_ssh_args(ssh_common_args) + for i, arg in enumerate(args): + if arg.lower() == 'proxycommand': + # _split_ssh_args split ProxyCommand from the command itself + proxy_command = args[i + 1] + else: + # ProxyCommand and the command itself are a single string + match = SETTINGS_REGEX.match(arg) + if match: + if match.group(1).lower() == 'proxycommand': + proxy_command = match.group(2) + + if proxy_command: + break + + proxy_command = proxy_command or C.PARAMIKO_PROXY_COMMAND + sock_kwarg = {} - if C.PARAMIKO_PROXY_COMMAND: + if proxy_command: replacers = { '%h': self._play_context.remote_addr, '%p': port, '%r': self._play_context.remote_user } - proxy_command = C.PARAMIKO_PROXY_COMMAND for find, replace in replacers.items(): proxy_command = proxy_command.replace(find, str(replace)) try: From 2587edb4f31390f51678bfaa2764146a16ed2841 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Thu, 24 Dec 2015 15:10:42 -0600 Subject: [PATCH 0260/1113] Move proxycommand parsing into _parse_proxy_command --- .../plugins/connection/paramiko_ssh.py | 47 ++++++++++--------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py index 47028a60a5a..21dfe0c7bc3 100644 --- a/lib/ansible/plugins/connection/paramiko_ssh.py +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -141,27 +141,7 @@ class Connection(ConnectionBase): self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached() return self - def _connect_uncached(self): - ''' activates the connection object ''' - - if not HAVE_PARAMIKO: - raise AnsibleError("paramiko is not installed") - - port = self._play_context.port or 22 - display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr), host=self._play_context.remote_addr) - - ssh = paramiko.SSHClient() - - self.keyfile = os.path.expanduser("~/.ssh/known_hosts") - - if C.HOST_KEY_CHECKING: - try: - #TODO: check if we need to look at several possible locations, possible for loop - ssh.load_system_host_keys("/etc/ssh/ssh_known_hosts") - except IOError: - pass # file was not found, but not required to function - ssh.load_system_host_keys() - + def _parse_proxy_command(self, port=22): proxy_command = None # Parse ansible_ssh_common_args, specifically looking for ProxyCommand ssh_common_args = getattr(self._play_context, 'ssh_common_args', None) @@ -200,6 +180,31 @@ class Connection(ConnectionBase): 'Please upgrade to Paramiko 1.9.0 or newer. ' 'Not using configured ProxyCommand') + return sock_kwarg + + def _connect_uncached(self): + ''' activates the connection object ''' + + if not HAVE_PARAMIKO: + raise AnsibleError("paramiko is not installed") + + port = self._play_context.port or 22 + display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr), host=self._play_context.remote_addr) + + ssh = paramiko.SSHClient() + + self.keyfile = os.path.expanduser("~/.ssh/known_hosts") + + if C.HOST_KEY_CHECKING: + try: + #TODO: check if we need to look at several possible locations, possible for loop + ssh.load_system_host_keys("/etc/ssh/ssh_known_hosts") + except IOError: + pass # file was not found, but not required to function + ssh.load_system_host_keys() + + sock_kwarg = self._parse_proxy_command(port) + ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self)) allow_agent = True From cd9e18d0e52c1915132614e6e2946a26968e3091 Mon Sep 17 00:00:00 2001 From: Stephen Medina <github@lilmail.xyz> Date: Fri, 25 Dec 2015 08:56:08 -0800 Subject: [PATCH 0261/1113] clarify idempotence explanation Small typo; wasn't sure what to replace it with. --- docsite/rst/intro_adhoc.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index 9e104d5836f..61ba33523a6 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -112,7 +112,7 @@ For example, using double rather than single quotes in the above example would evaluate the variable on the box you were on. So far we've been demoing simple command execution, but most Ansible modules usually do not work like -simple scripts. They make the remote system look like you state, and run the commands necessary to +simple scripts. They make the remote system look like a state, and run the commands necessary to get it there. This is commonly referred to as 'idempotence', and is a core design goal of Ansible. However, we also recognize that running arbitrary commands is equally important, so Ansible easily supports both. From d70a97b562da1b06d21a86fd1c7619bfa2b6a2e6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 25 Dec 2015 12:17:22 -0800 Subject: [PATCH 0262/1113] Update submodule refs --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 19e496c69c2..f6a7b6dd1f7 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 19e496c69c22fc7ec1e3c8306b363a812b85d386 +Subproject commit f6a7b6dd1f7be93ba640c50bf26adeeabb5af46f From 0b92abaf67de53349bb4d2733f49750d9a4d8277 Mon Sep 17 00:00:00 2001 From: Etherdaemon <kaz.cheng@gmail.com> Date: Sun, 27 Dec 2015 21:31:59 +1000 Subject: [PATCH 0263/1113] Proposed fix for ansible/ansible-modules-extras#1348 due to datetime.datetime type not being matched --- lib/ansible/module_utils/basic.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 6fd382aa490..89d595a0bf3 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -65,6 +65,7 @@ import grp import pwd import platform import errno +import datetime from itertools import repeat, chain try: @@ -423,10 +424,13 @@ def remove_values(value, no_log_strings): for omit_me in no_log_strings: if omit_me in stringy_value: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + elif isinstance(value, datetime.datetime): + value = value.isoformat() else: raise TypeError('Value of unknown type: %s, %s' % (type(value), value)) return value + def heuristic_log_sanitize(data, no_log_values=None): ''' Remove strings that look like passwords from log messages ''' # Currently filters: From c489b271d152820ab11b73d11877f8805318cd7a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sun, 27 Dec 2015 14:17:20 -0500 Subject: [PATCH 0264/1113] updated release cycle to 4 months instead of 2 --- docsite/rst/intro_installation.rst | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index e986ffd70f6..a5ed83a3027 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -27,12 +27,11 @@ What Version To Pick? ````````````````````` Because it runs so easily from source and does not require any installation of software on remote -machines, many users will actually track the development version. +machines, many users will actually track the development version. -Ansible's release cycles are usually about two months long. Due to this -short release cycle, minor bugs will generally be fixed in the next release versus maintaining -backports on the stable branch. Major bugs will still have maintenance releases when needed, though -these are infrequent. +Ansible's release cycles are usually about four months long. Due to this short release cycle, +minor bugs will generally be fixed in the next release versus maintaining backports on the stable branch. +Major bugs will still have maintenance releases when needed, though these are infrequent. If you are wishing to run the latest released version of Ansible and you are running Red Hat Enterprise Linux (TM), CentOS, Fedora, Debian, or Ubuntu, we recommend using the OS package manager. From 20005660313b5abc4188704fc3a37a4c25f83e62 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 28 Dec 2015 10:24:28 -0500 Subject: [PATCH 0265/1113] minor fix to become docs --- docsite/rst/become.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 64628515c6c..7597643f883 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -1,5 +1,5 @@ -Ansible Privilege Escalation -++++++++++++++++++++++++++++ +Become (Privilege Escalation) ++++++++++++++++++++++++++++++ Ansible can use existing privilege escalation systems to allow a user to execute tasks as another. @@ -7,17 +7,17 @@ Ansible can use existing privilege escalation systems to allow a user to execute Become `````` -Before 1.9 Ansible mostly allowed the use of sudo and a limited use of su to allow a login/remote user to become a different user -and execute tasks, create resources with the 2nd user's permissions. As of 1.9 'become' supersedes the old sudo/su, while still -being backwards compatible. This new system also makes it easier to add other privilege escalation tools like pbrun (Powerbroker), -pfexec and others. +Before 1.9 Ansible mostly allowed the use of `sudo` and a limited use of `su` to allow a login/remote user to become a different user +and execute tasks, create resources with the 2nd user's permissions. As of 1.9 `become` supersedes the old sudo/su, while still +being backwards compatible. This new system also makes it easier to add other privilege escalation tools like `pbrun` (Powerbroker), +`pfexec` and others. New directives -------------- become - equivalent to adding 'sudo:' or 'su:' to a play or task, set to 'true'/'yes' to activate privilege escalation + equivalent to adding `sudo:` or `su:` to a play or task, set to 'true'/'yes' to activate privilege escalation become_user equivalent to adding 'sudo_user:' or 'su_user:' to a play or task, set to user with desired privileges From 56454d6a9135fb18e5d0545b9162b940cbcb8a78 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 28 Dec 2015 12:25:27 -0500 Subject: [PATCH 0266/1113] added newer vars to 'reset_vars' these vars pass back info to the task about the connection moved to their own block at start at file for readability and added the newer standard vars --- lib/ansible/playbook/play_context.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 81223500adf..6b19f4c1723 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -125,6 +125,18 @@ TASK_ATTRIBUTE_OVERRIDES = ( 'remote_user', ) +RESET_VARS = ( + 'ansible_connection', + 'ansible_ssh_host', + 'ansible_ssh_pass', + 'ansible_ssh_port', + 'ansible_ssh_user', + 'ansible_ssh_private_key_file', + 'ansible_ssh_pipelining', + 'ansible_user', + 'ansible_host', + 'ansible_port', +) class PlayContext(Base): @@ -505,7 +517,8 @@ class PlayContext(Base): # TODO: should we be setting the more generic values here rather than # the more specific _ssh_ ones? - for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file', 'ansible_ssh_pipelining']: + for special_var in RESET_VARS: + if special_var not in variables: for prop, varnames in MAGIC_VARIABLE_MAPPING.items(): if special_var in varnames: From 2d11cfab92f9d26448461b4bc81f466d1910a15e Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 29 Dec 2015 11:40:18 -0500 Subject: [PATCH 0267/1113] Squashed commit of the following: commit 24efa310b58c431b4d888a6315d1285da918f670 Author: James Cammarata <jimi@sngx.net> Date: Tue Dec 29 11:23:52 2015 -0500 Adding an additional test for copy exclusion Adds a negative test for the situation when an exclusion doesn't exist in the target to be copied. commit 643ba054877cf042177d65e6e2958178bdd2fe88 Merge: e6ee59f 66a8f7e Author: James Cammarata <jimi@sngx.net> Date: Tue Dec 29 10:59:18 2015 -0500 Merge branch 'speedup' of https://github.com/chrismeyersfsu/ansible into chrismeyersfsu-speedup commit 66a8f7e873ca90f7848e47b04d9b62aed23a45df Author: Chris Meyers <chris.meyers.fsu@gmail.com> Date: Mon Dec 28 09:47:00 2015 -0500 better api and tests added * _copy_results = deepcopy for better performance * _copy_results_exclude to deepcopy but exclude certain fields. Pop fields that do not need to be deep copied. Re-assign popped fields after deep copy so we don't modify the original, to be copied, object. * _copy_results_exclude unit tests commit 93490960ff4e75f38a7cc6f6d49f10f949f1a7da Author: Chris Meyers <chris.meyers.fsu@gmail.com> Date: Fri Dec 25 23:17:26 2015 -0600 remove uneeded deepcopy fields --- lib/ansible/plugins/callback/__init__.py | 19 ++++- test/units/plugins/callback/test_callback.py | 82 ++++++++++++++++++++ 2 files changed, 97 insertions(+), 4 deletions(-) create mode 100644 test/units/plugins/callback/test_callback.py diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 7371fe0a51e..cc2a9ad0e75 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -59,9 +59,20 @@ class CallbackBase: version = getattr(self, 'CALLBACK_VERSION', '1.0') self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version)) - def _copy_result(self, result): - ''' helper for callbacks, so they don't all have to include deepcopy ''' - return deepcopy(result) + ''' helper for callbacks, so they don't all have to include deepcopy ''' + _copy_result = deepcopy + + def _copy_result_exclude(self, result, exclude): + values = [] + for e in exclude: + values.append(getattr(result, e)) + setattr(result, e, None) + + result_copy = deepcopy(result) + for i,e in enumerate(exclude): + setattr(result, e, values[i]) + + return result_copy def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if result.get('_ansible_no_log', False): @@ -130,7 +141,7 @@ class CallbackBase: def _process_items(self, result): for res in result._result['results']: - newres = self._copy_result(result) + newres = self._copy_result_exclude(result, ['_result']) res['item'] = self._get_item(res) newres._result = res if 'failed' in res and res['failed']: diff --git a/test/units/plugins/callback/test_callback.py b/test/units/plugins/callback/test_callback.py new file mode 100644 index 00000000000..54964ac9df2 --- /dev/null +++ b/test/units/plugins/callback/test_callback.py @@ -0,0 +1,82 @@ +# (c) 2012-2014, Chris Meyers <chris.meyers.fsu@gmail.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from six import PY3 +from copy import deepcopy + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, mock_open + +from ansible.plugins.callback import CallbackBase +import ansible.plugins.callback as callish + +class TestCopyResultExclude(unittest.TestCase): + def setUp(self): + class DummyClass(): + def __init__(self): + self.bar = [ 1, 2, 3 ] + self.a = { + "b": 2, + "c": 3, + } + self.b = { + "c": 3, + "d": 4, + } + self.foo = DummyClass() + self.cb = CallbackBase() + + def tearDown(self): + pass + + def test_copy_logic(self): + res = self.cb._copy_result_exclude(self.foo, ()) + self.assertEqual(self.foo.bar, res.bar) + + def test_copy_deep(self): + res = self.cb._copy_result_exclude(self.foo, ()) + self.assertNotEqual(id(self.foo.bar), id(res.bar)) + + def test_no_exclude(self): + res = self.cb._copy_result_exclude(self.foo, ()) + self.assertEqual(self.foo.bar, res.bar) + self.assertEqual(self.foo.a, res.a) + self.assertEqual(self.foo.b, res.b) + + def test_exclude(self): + res = self.cb._copy_result_exclude(self.foo, ['bar', 'b']) + self.assertIsNone(res.bar) + self.assertIsNone(res.b) + self.assertEqual(self.foo.a, res.a) + + def test_result_unmodified(self): + bar_id = id(self.foo.bar) + a_id = id(self.foo.a) + res = self.cb._copy_result_exclude(self.foo, ['bar', 'a']) + + self.assertEqual(self.foo.bar, [ 1, 2, 3 ]) + self.assertEqual(bar_id, id(self.foo.bar)) + + self.assertEqual(self.foo.a, dict(b=2, c=3)) + self.assertEqual(a_id, id(self.foo.a)) + + self.assertRaises(AttributeError, self.cb._copy_result_exclude, self.foo, ['a', 'c', 'bar']) + From d3deb24ead59d5fdbecad3c946848537f95772ad Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 29 Dec 2015 15:41:00 -0500 Subject: [PATCH 0268/1113] output color is now configurable --- examples/ansible.cfg | 11 ++++++ lib/ansible/cli/galaxy.py | 25 +++++++------- lib/ansible/constants.py | 11 ++++++ lib/ansible/executor/task_executor.py | 2 +- lib/ansible/playbook/__init__.py | 3 +- lib/ansible/plugins/callback/default.py | 46 ++++++++++++------------- lib/ansible/plugins/callback/minimal.py | 17 +++++---- lib/ansible/plugins/callback/oneline.py | 14 ++++---- lib/ansible/utils/color.py | 3 +- lib/ansible/utils/display.py | 14 ++++---- 10 files changed, 86 insertions(+), 60 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index ec3ddf20641..b357738b39c 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -262,3 +262,14 @@ # the default behaviour that copies the existing context or uses the user default # needs to be changed to use the file system dependent context. #special_context_filesystems=nfs,vboxsf,fuse,ramfs + +[colors] +#verbose = blue +#warn = bright purple +#error = red +#debug = dark gray +#deprecate = purple +#skip = cyan +#unreachable = red +#ok = green +#changed = yellow diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 34afa03c9f7..476a7d0f897 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -514,7 +514,7 @@ class GalaxyCLI(CLI): tags=self.options.tags, author=self.options.author, page_size=page_size) if response['count'] == 0: - display.display("No roles match your search.", color="yellow") + display.display("No roles match your search.", color=C.COLOR_ERROR) return True data = '' @@ -570,10 +570,10 @@ class GalaxyCLI(CLI): colors = { 'INFO': 'normal', - 'WARNING': 'yellow', - 'ERROR': 'red', - 'SUCCESS': 'green', - 'FAILED': 'red' + 'WARNING': C.COLOR_WARN, + 'ERROR': C.COLOR_ERROR, + 'SUCCESS': C.COLOR_OK, + 'FAILED': C.COLOR_ERROR, } if len(self.args) < 2: @@ -592,11 +592,10 @@ class GalaxyCLI(CLI): # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), color='yellow') - display.display("The following Galaxy roles are being updated:" + u'\n', color='yellow') + display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED) for t in task: - display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color='yellow') - display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), - color='yellow') + display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED) + display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), color=C.COLOR_CHANGED) return 0 # found a single role as expected display.display("Successfully submitted import request %d" % task[0]['id']) @@ -633,17 +632,17 @@ class GalaxyCLI(CLI): # None found display.display("No integrations found.") return 0 - display.display(u'\n' + "ID Source Repo", color="green") - display.display("---------- ---------- ----------", color="green") + display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK) + display.display("---------- ---------- ----------", color=C.COLOR_OK) for secret in secrets: display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'], - secret['github_repo']),color="green") + secret['github_repo']),color=C.COLOR_OK) return 0 if self.options.remove_id: # Remove a secret self.api.remove_secret(self.options.remove_id) - display.display("Secret removed. Integrations using this secret will not longer work.", color="green") + display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK) return 0 if len(self.args) < 4: diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 5df9602246a..9b84825d6bc 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -268,6 +268,17 @@ GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True ) +# colors +COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue') +COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple') +COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red') +COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray') +COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple') +COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan') +COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red') +COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green') +COLOR_CHANGED = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_CHANGED', 'yellow') + # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] MODULE_NO_JSON = ['command', 'shell', 'raw'] diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index c8b6fa179bc..4a2d30a2cd2 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -393,7 +393,7 @@ class TaskExecutor: result = None for attempt in range(retries): if attempt > 0: - display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color="dark gray") + display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color=C.COLOR_DEBUG) result['attempts'] = attempt + 1 display.debug("running the handler") diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 0ae443f8436..947224d61fc 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -25,6 +25,7 @@ from ansible.errors import AnsibleParserError from ansible.playbook.play import Play from ansible.playbook.playbook_include import PlaybookInclude from ansible.plugins import get_all_plugin_loaders +from ansible import constants as C try: from __main__ import display @@ -87,7 +88,7 @@ class Playbook: if pb is not None: self._entries.extend(pb._entries) else: - display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color='cyan') + display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color=C.COLOR_SKIP) else: entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader) self._entries.append(entry_obj) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index e515945bba5..421104ee837 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -44,7 +44,7 @@ class CallbackModule(CallbackBase): else: msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] @@ -53,12 +53,12 @@ class CallbackModule(CallbackBase): self._process_items(result) else: if delegated_vars: - self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) else: - self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR) if result._task.ignore_errors: - self._display.display("...ignoring", color='cyan') + self._display.display("...ignoring", color=C.COLOR_SKIP) def v2_runner_on_ok(self, result): @@ -71,13 +71,13 @@ class CallbackModule(CallbackBase): msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' + color = C.COLOR_CHANGED else: if delegated_vars: msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "ok: [%s]" % result._host.get_name() - color = 'green' + color = C.COLOR_OK if result._task.loop and 'results' in result._result: self._process_items(result) @@ -97,17 +97,17 @@ class CallbackModule(CallbackBase): msg = "skipping: [%s]" % result._host.get_name() if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % self._dump_results(result._result) - self._display.display(msg, color='cyan') + self._display.display(msg, color=C.COLOR_SKIP) def v2_runner_on_unreachable(self, result): delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) else: - self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR) def v2_playbook_on_no_hosts_matched(self): - self._display.display("skipping: no hosts matched", color='cyan') + self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP) def v2_playbook_on_no_hosts_remaining(self): self._display.banner("NO MORE HOSTS LEFT") @@ -117,7 +117,7 @@ class CallbackModule(CallbackBase): if self._display.verbosity > 2: path = task.get_path() if path: - self._display.display("task path: %s" % path, color='dark gray') + self._display.display("task path: %s" % path, color=C.COLOR_DEBUG) def v2_playbook_on_cleanup_task_start(self, task): self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip()) @@ -155,13 +155,13 @@ class CallbackModule(CallbackBase): msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' + color = C.COLOR_CHANGED else: if delegated_vars: msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "ok: [%s]" % result._host.get_name() - color = 'green' + color = C.COLOR_OK msg += " => (item=%s)" % (result._result['item'],) @@ -179,15 +179,15 @@ class CallbackModule(CallbackBase): else: msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] if delegated_vars: - self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red') + self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR) else: - self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red') + self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR) self._handle_warnings(result._result) @@ -195,12 +195,12 @@ class CallbackModule(CallbackBase): msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item']) if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % self._dump_results(result._result) - self._display.display(msg, color='cyan') + self._display.display(msg, color=C.COLOR_SKIP) def v2_playbook_on_include(self, included_file): msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) - color = 'cyan' - self._display.display(msg, color='cyan') + color = C.COLOR_SKIP + self._display.display(msg, color=C.COLOR_SKIP) def v2_playbook_on_stats(self, stats): self._display.banner("PLAY RECAP") @@ -211,10 +211,10 @@ class CallbackModule(CallbackBase): self._display.display(u"%s : %s %s %s %s" % ( hostcolor(h, t), - colorize(u'ok', t['ok'], 'green'), - colorize(u'changed', t['changed'], 'yellow'), - colorize(u'unreachable', t['unreachable'], 'red'), - colorize(u'failed', t['failures'], 'red')), + colorize(u'ok', t['ok'], C.COLOR_OK), + colorize(u'changed', t['changed'], C.COLOR_CHANGED), + colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), + colorize(u'failed', t['failures'], C.COLOR_ERROR)), screen_only=True ) diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index 71f9f5dfeef..9fa257af747 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -53,29 +53,32 @@ class CallbackModule(CallbackBase): else: msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color='red') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR) else: - self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red') + self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR) def v2_runner_on_ok(self, result): self._clean_results(result._result, result._task.action) if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color='green') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color=C.COLOR_OK) else: - self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='green') + if 'changed' in result._result and result._result['changed']: + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_CHANGED) + else: + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_OK) self._handle_warnings(result._result) def v2_runner_on_skipped(self, result): - self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan') + self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP) def v2_runner_on_unreachable(self, result): - self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='yellow') + self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE) def v2_on_file_diff(self, result): if 'diff' in result._result and result._result['diff']: diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py index a99b680c05c..0f6283fd441 100644 --- a/lib/ansible/plugins/callback/oneline.py +++ b/lib/ansible/plugins/callback/oneline.py @@ -52,24 +52,24 @@ class CallbackModule(CallbackBase): msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','') if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color='red') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color=C.COLOR_ERROR) else: - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] - self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red') + self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_ERROR) def v2_runner_on_ok(self, result): if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color='green') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color=C.COLOR_OK) else: - self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green') + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_OK) def v2_runner_on_unreachable(self, result): - self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow') + self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color=C.COLOR_UNREACHABLE) def v2_runner_on_skipped(self, result): - self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan') + self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP) diff --git a/lib/ansible/utils/color.py b/lib/ansible/utils/color.py index 55060ace040..81a05d749e1 100644 --- a/lib/ansible/utils/color.py +++ b/lib/ansible/utils/color.py @@ -62,7 +62,8 @@ codeCodes = { 'purple': u'0;35', 'bright red': u'1;31', 'yellow': u'0;33', 'bright purple': u'1;35', 'dark gray': u'1;30', 'bright yellow': u'1;33', - 'normal': u'0' + 'magenta': u'0;35', 'bright magenta': u'1;35', + 'normal': u'0' , } def stringc(text, color): diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 3d51f17de47..8700a510186 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -145,7 +145,7 @@ class Display: # characters that are invalid in the user's locale msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr)) - if color == 'red': + if color == C.COLOR_ERROR: logger.error(msg2) else: logger.info(msg2) @@ -168,7 +168,7 @@ class Display: def debug(self, msg): if C.DEFAULT_DEBUG: debug_lock.acquire() - self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color='dark gray') + self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG) debug_lock.release() def verbose(self, msg, host=None, caplevel=2): @@ -176,9 +176,9 @@ class Display: #msg = utils.sanitize_output(msg) if self.verbosity > caplevel: if host is None: - self.display(msg, color='blue') + self.display(msg, color=C.COLOR_VERBOSE) else: - self.display("<%s> %s" % (host, msg), color='blue', screen_only=True) + self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, screen_only=True) def deprecated(self, msg, version=None, removed=False): ''' used to print out a deprecation message.''' @@ -199,7 +199,7 @@ class Display: new_msg = "\n".join(wrapped) + "\n" if new_msg not in self._deprecations: - self.display(new_msg.strip(), color='purple', stderr=True) + self.display(new_msg.strip(), color=C.COLOR_DEPRECATE, stderr=True) self._deprecations[new_msg] = 1 def warning(self, msg): @@ -207,7 +207,7 @@ class Display: wrapped = textwrap.wrap(new_msg, self.columns) new_msg = "\n".join(wrapped) + "\n" if new_msg not in self._warns: - self.display(new_msg, color='bright purple', stderr=True) + self.display(new_msg, color=C.COLOR_WARN, stderr=True) self._warns[new_msg] = 1 def system_warning(self, msg): @@ -258,7 +258,7 @@ class Display: else: new_msg = msg if new_msg not in self._errors: - self.display(new_msg, color='red', stderr=True) + self.display(new_msg, color=C.COLOR_ERROR, stderr=True) self._errors[new_msg] = 1 @staticmethod From 5accc9858739d2184235bf8722b83ff7bcc97056 Mon Sep 17 00:00:00 2001 From: mgarstecki <madmath@chamion.net> Date: Wed, 30 Dec 2015 11:57:12 +0100 Subject: [PATCH 0269/1113] Correction of a double negation The sentence seemed to imply that return codes from modules are significant, while they are not. The second part of the sentence confirms this, as it advises to use standard return codes only for future proofing. --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index d3781b2f7fd..5d664d56313 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -347,7 +347,7 @@ and guidelines: * In the event of failure, a key of 'failed' should be included, along with a string explanation in 'msg'. Modules that raise tracebacks (stacktraces) are generally considered 'poor' modules, though Ansible can deal with these returns and will automatically convert anything unparseable into a failed result. If you are using the AnsibleModule common Python code, the 'failed' element will be included for you automatically when you call 'fail_json'. -* Return codes from modules are not actually not significant, but continue on with 0=success and non-zero=failure for reasons of future proofing. +* Return codes from modules are actually not significant, but continue on with 0=success and non-zero=failure for reasons of future proofing. * As results from many hosts will be aggregated at once, modules should return only relevant output. Returning the entire contents of a log file is generally bad form. From 946b82bef71d3b2d4ecf07ec937b650634bc84a0 Mon Sep 17 00:00:00 2001 From: Eric Feliksik <e.feliksik@nerdalize.com> Date: Wed, 30 Dec 2015 18:21:34 +0100 Subject: [PATCH 0270/1113] shred ansible-vault tmp_file. Also when editor is interruped. --- lib/ansible/parsing/vault/__init__.py | 35 ++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index d8cf66feca4..b7304d156fe 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -219,7 +219,27 @@ class VaultEditor: def __init__(self, password): self.vault = VaultLib(password) - + + def _shred_file(self, tmp_path): + """securely destroy a decrypted file.""" + def generate_data(length): + import string, random + chars = string.ascii_lowercase + string.ascii_uppercase + string.digits + return ''.join(random.SystemRandom().choice(chars) for _ in range(length)) + + if not os.path.isfile(tmp_path): + # file is already gone + return + + ld = os.path.getsize(tmp_path) + passes = 3 + with open(tmp_path, "w") as fh: + for _ in range(int(passes)): + data = generate_data(ld) + fh.write(data) + fh.seek(0, 0) + os.remove(tmp_path) + def _edit_file_helper(self, filename, existing_data=None, force_save=False): # Create a tempfile @@ -229,12 +249,18 @@ class VaultEditor: self.write_data(existing_data, tmp_path) # drop the user into an editor on the tmp file - call(self._editor_shell_command(tmp_path)) + try: + call(self._editor_shell_command(tmp_path)) + except: + # whatever happens, destroy the decrypted file + self._shred_file(tmp_path) + raise + tmpdata = self.read_data(tmp_path) # Do nothing if the content has not changed if existing_data == tmpdata and not force_save: - os.remove(tmp_path) + self._shred_file(tmp_path) return # encrypt new data and write out to tmp @@ -329,7 +355,7 @@ class VaultEditor: sys.stdout.write(bytes) else: if os.path.isfile(filename): - os.remove(filename) + self._shred_file(filename) with open(filename, "wb") as fh: fh.write(bytes) @@ -338,6 +364,7 @@ class VaultEditor: # overwrite dest with src if os.path.isfile(dest): prev = os.stat(dest) + # old file 'dest' was encrypted, no need to _shred_file os.remove(dest) shutil.move(src, dest) From e39e8ba308364f16e3b74db96b15415ab97b5f52 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 30 Dec 2015 13:49:39 -0500 Subject: [PATCH 0271/1113] Fix logic mistake in unarchive action plugin --- lib/ansible/plugins/action/unarchive.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py index cd89b936fed..b6c43a3c595 100644 --- a/lib/ansible/plugins/action/unarchive.py +++ b/lib/ansible/plugins/action/unarchive.py @@ -69,14 +69,14 @@ class ActionModule(ActionBase): source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source) remote_checksum = self._remote_checksum(dest, all_vars=task_vars) - if remote_checksum != '3': - result['failed'] = True - result['msg'] = "dest '%s' must be an existing dir" % dest - return result - elif remote_checksum == '4': + if remote_checksum == '4': result['failed'] = True result['msg'] = "python isn't present on the system. Unable to compute checksum" return result + elif remote_checksum != '3': + result['failed'] = True + result['msg'] = "dest '%s' must be an existing dir" % dest + return result if copy: # transfer the file to a remote tmp location From 5c34be15b1c800a513a88005c6e6b05f360dfef1 Mon Sep 17 00:00:00 2001 From: Thilo Uttendorfer <tlo@sengaya.de> Date: Thu, 31 Dec 2015 02:31:38 +0100 Subject: [PATCH 0272/1113] Fix unsupported format character --- lib/ansible/utils/module_docs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index 4a90c3caca1..14a5d030565 100755 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -67,7 +67,7 @@ def get_docstring(filename, verbose=False): theid = t.id except AttributeError as e: # skip errors can happen when trying to use the normal code - display.warning("Failed to assign id for %t on %s, skipping" % (t, filename)) + display.warning("Failed to assign id for %s on %s, skipping" % (t, filename)) continue if 'DOCUMENTATION' in theid: From c4d2dbfcdbf8743760d658f1bcbec23e912514a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= <yannig.perre@gmail.com> Date: Fri, 1 Jan 2016 15:55:51 +0100 Subject: [PATCH 0273/1113] Replace to_string by to_unicode. Fix https://github.com/ansible/ansible/issues/13707 --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 095118e50eb..885005960f5 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -112,7 +112,7 @@ class Inventory(object): try: (host, port) = parse_address(h, allow_ranges=False) except AnsibleError as e: - display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_string(e)) + display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_unicode(e)) host = h port = None all.add_host(Host(host, port)) From 6f2f7a79b34910a75e6eafde5a7872b3e7bcb770 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 1 Jan 2016 21:52:41 -0500 Subject: [PATCH 0274/1113] add support for diff in file settings this allows modules to report on what specifically changed when using common file functions --- lib/ansible/module_utils/basic.py | 61 ++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 6fd382aa490..1366bfceb40 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -751,7 +751,7 @@ class AnsibleModule(object): context = self.selinux_default_context(path) return self.set_context_if_different(path, context, False) - def set_context_if_different(self, path, context, changed): + def set_context_if_different(self, path, context, changed, diff=None): if not HAVE_SELINUX or not self.selinux_enabled(): return changed @@ -772,6 +772,14 @@ class AnsibleModule(object): new_context[i] = cur_context[i] if cur_context != new_context: + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['secontext'] = cur_context + if 'after' not in diff: + diff['after'] = {} + diff['after']['secontext'] = new_context + try: if self.check_mode: return True @@ -785,7 +793,7 @@ class AnsibleModule(object): changed = True return changed - def set_owner_if_different(self, path, owner, changed): + def set_owner_if_different(self, path, owner, changed, diff=None): path = os.path.expanduser(path) if owner is None: return changed @@ -798,6 +806,15 @@ class AnsibleModule(object): except KeyError: self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner) if orig_uid != uid: + + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['owner'] = orig_uid + if 'after' not in diff: + diff['after'] = {} + diff['after']['owner'] = uid + if self.check_mode: return True try: @@ -807,7 +824,7 @@ class AnsibleModule(object): changed = True return changed - def set_group_if_different(self, path, group, changed): + def set_group_if_different(self, path, group, changed, diff=None): path = os.path.expanduser(path) if group is None: return changed @@ -820,6 +837,15 @@ class AnsibleModule(object): except KeyError: self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group) if orig_gid != gid: + + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['group'] = orig_gid + if 'after' not in diff: + diff['after'] = {} + diff['after']['group'] = gid + if self.check_mode: return True try: @@ -829,7 +855,7 @@ class AnsibleModule(object): changed = True return changed - def set_mode_if_different(self, path, mode, changed): + def set_mode_if_different(self, path, mode, changed, diff=None): path = os.path.expanduser(path) path_stat = os.lstat(path) @@ -851,6 +877,15 @@ class AnsibleModule(object): prev_mode = stat.S_IMODE(path_stat.st_mode) if prev_mode != mode: + + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['mode'] = prev_mode + if 'after' not in diff: + diff['after'] = {} + diff['after']['mode'] = mode + if self.check_mode: return True # FIXME: comparison against string above will cause this to be executed @@ -984,27 +1019,27 @@ class AnsibleModule(object): or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm] return reduce(or_reduce, perms, 0) - def set_fs_attributes_if_different(self, file_args, changed): + def set_fs_attributes_if_different(self, file_args, changed, diff=None): # set modes owners and context as needed changed = self.set_context_if_different( - file_args['path'], file_args['secontext'], changed + file_args['path'], file_args['secontext'], changed, diff ) changed = self.set_owner_if_different( - file_args['path'], file_args['owner'], changed + file_args['path'], file_args['owner'], changed, diff ) changed = self.set_group_if_different( - file_args['path'], file_args['group'], changed + file_args['path'], file_args['group'], changed, diff ) changed = self.set_mode_if_different( - file_args['path'], file_args['mode'], changed + file_args['path'], file_args['mode'], changed, diff ) return changed - def set_directory_attributes_if_different(self, file_args, changed): - return self.set_fs_attributes_if_different(file_args, changed) + def set_directory_attributes_if_different(self, file_args, changed, diff=None): + return self.set_fs_attributes_if_different(file_args, changed, diff) - def set_file_attributes_if_different(self, file_args, changed): - return self.set_fs_attributes_if_different(file_args, changed) + def set_file_attributes_if_different(self, file_args, changed, diff=None): + return self.set_fs_attributes_if_different(file_args, changed, diff) def add_path_info(self, kwargs): ''' From 210cf06d9ac8e62b15d6f34e9c63c1b98986a1d5 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 2 Jan 2016 00:31:09 -0500 Subject: [PATCH 0275/1113] Tweak how strategies evaluate failed hosts via the iterator and bug fixes * Added additional methods to the iterator code to assess host failures while also taking into account the block rescue/always states * Fixed bugs in the free strategy, where results were not always being processed after being collected * Added some prettier printing to the state output from iterator Fixes #13699 --- lib/ansible/executor/play_iterator.py | 46 ++++++++++++++++++++++++-- lib/ansible/plugins/strategy/free.py | 12 ++----- lib/ansible/plugins/strategy/linear.py | 5 +-- 3 files changed, 49 insertions(+), 14 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 534f216c30a..147e46e5aa7 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -57,14 +57,32 @@ class HostState: self.always_child_state = None def __repr__(self): - return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % ( + def _run_state_to_string(n): + states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"] + try: + return states[n] + except IndexError: + return "UNKNOWN STATE" + + def _failed_state_to_string(n): + states = {1:"FAILED_SETUP", 2:"FAILED_TASKS", 4:"FAILED_RESCUE", 8:"FAILED_ALWAYS"} + if n == 0: + return "FAILED_NONE" + else: + ret = [] + for i in (1, 2, 4, 8): + if n & i: + ret.append(states[i]) + return "|".join(ret) + + return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % ( self.cur_block, self.cur_regular_task, self.cur_rescue_task, self.cur_always_task, self.cur_role, - self.run_state, - self.fail_state, + _run_state_to_string(self.run_state), + _failed_state_to_string(self.fail_state), self.pending_setup, self.tasks_child_state, self.rescue_child_state, @@ -347,6 +365,28 @@ class PlayIterator: def get_failed_hosts(self): return dict((host, True) for (host, state) in iteritems(self._host_states) if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE) + def _check_failed_state(self, state): + if state is None: + return False + elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state): + return True + elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state): + return True + elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state): + return True + elif state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE: + if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0: + return False + elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0: + return False + else: + return True + return False + + def is_failed(self, host): + s = self.get_host_state(host) + return self._check_failed_state(s) + def get_original_task(self, host, task): ''' Finds the task in the task list which matches the UUID of the given task. diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index f4fc1226a1f..976d33abba0 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -78,7 +78,7 @@ class StrategyModule(StrategyBase): (state, task) = iterator.get_next_task_for_host(host, peek=True) display.debug("free host state: %s" % state) display.debug("free host task: %s" % task) - if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task: + if not iterator.is_failed(host) and host_name not in self._tqm._unreachable_hosts and task: # set the flag so the outer loop knows we've still found # some work which needs to be done @@ -135,7 +135,7 @@ class StrategyModule(StrategyBase): if last_host == starting_host: break - results = self._process_pending_results(iterator) + results = self._wait_on_pending_results(iterator) host_results.extend(results) try: @@ -176,13 +176,7 @@ class StrategyModule(StrategyBase): display.debug("done adding collected blocks to iterator") # pause briefly so we don't spin lock - time.sleep(0.05) - - try: - results = self._wait_on_pending_results(iterator) - host_results.extend(results) - except Exception as e: - pass + time.sleep(0.001) # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 7bb227dbaea..bfa2c37ce43 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -54,7 +54,8 @@ class StrategyModule(StrategyBase): host_tasks = {} display.debug("building list of next tasks for hosts") for host in hosts: - host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True) + if not iterator.is_failed(host): + host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True) display.debug("done building task lists") num_setups = 0 @@ -98,7 +99,7 @@ class StrategyModule(StrategyBase): rvals = [] display.debug("starting to advance hosts") for host in hosts: - host_state_task = host_tasks[host.name] + host_state_task = host_tasks.get(host.name) if host_state_task is None: continue (s, t) = host_state_task From 7193d27acc7719b25b70eb4709964d0c93796162 Mon Sep 17 00:00:00 2001 From: Eric Feliksik <e.feliksik@nerdalize.com> Date: Mon, 4 Jan 2016 17:19:35 +0100 Subject: [PATCH 0276/1113] add os.fsync() so that the shredding data (hopefully) hits the drive --- lib/ansible/parsing/vault/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index b7304d156fe..1eca0cd5714 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -235,9 +235,10 @@ class VaultEditor: passes = 3 with open(tmp_path, "w") as fh: for _ in range(int(passes)): + fh.seek(0, 0) data = generate_data(ld) fh.write(data) - fh.seek(0, 0) + os.fsync(fh) os.remove(tmp_path) def _edit_file_helper(self, filename, existing_data=None, force_save=False): From 8599c566701582024c6eaeeb5cf52d249f48a49e Mon Sep 17 00:00:00 2001 From: Michael Scherer <misc@zarb.org> Date: Mon, 4 Jan 2016 17:46:40 +0100 Subject: [PATCH 0277/1113] Do not set 'changed' to True when using group_by Since group_by is not changing in any way to the remote system, there is no change. This also make things more consistent with the set_fact plugin. --- lib/ansible/plugins/action/group_by.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py index a891d3c70d5..99f9db2a88c 100644 --- a/lib/ansible/plugins/action/group_by.py +++ b/lib/ansible/plugins/action/group_by.py @@ -40,6 +40,6 @@ class ActionModule(ActionBase): group_name = self._task.args.get('key') group_name = group_name.replace(' ','-') - result['changed'] = True + result['changed'] = False result['add_group'] = group_name return result From 1e911375e850e79295d053f3e3c45c9d9d247159 Mon Sep 17 00:00:00 2001 From: Eric Feliksik <e.feliksik@nerdalize.com> Date: Mon, 4 Jan 2016 18:13:59 +0100 Subject: [PATCH 0278/1113] add docs, remove unnecessary int() cast --- lib/ansible/parsing/vault/__init__.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 1eca0cd5714..28e819860ae 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -221,7 +221,22 @@ class VaultEditor: self.vault = VaultLib(password) def _shred_file(self, tmp_path): - """securely destroy a decrypted file.""" + """Securely destroy a decrypted file + + Inspired by unix `shred', try to destroy the secrets "so that they can be + recovered only with great difficulty with specialised hardware, if at all". + + See https://github.com/ansible/ansible/pull/13700 . + + Note that: + - For flash: overwriting would have no effect (due to wear leveling). But the + added disk wear is considered insignificant. + - For other storage systems: the filesystem lies to the vfs (kernel), the disk + driver lies to the filesystem and the disk lies to the driver. But it's better + than nothing. + - most tmp dirs are now tmpfs (ramdisks), for which this is a non-issue. + """ + def generate_data(length): import string, random chars = string.ascii_lowercase + string.ascii_uppercase + string.digits @@ -234,7 +249,7 @@ class VaultEditor: ld = os.path.getsize(tmp_path) passes = 3 with open(tmp_path, "w") as fh: - for _ in range(int(passes)): + for _ in range(passes): fh.seek(0, 0) data = generate_data(ld) fh.write(data) From de529c17340074b1d96937cf4d688da0a7e3bd31 Mon Sep 17 00:00:00 2001 From: "Fuentes, Christopher" <Christopher.Fuentes@ellucian.com> Date: Mon, 4 Jan 2016 13:52:06 -0500 Subject: [PATCH 0279/1113] minor grammar error was making me pull hair out --- docsite/rst/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index 90b9a1cb09e..e51a1751fee 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -38,7 +38,7 @@ You can also dictate the connection type to be used, if you want:: foo.example.com bar.example.com -You may also wish to keep these in group variables instead, or file in them in a group_vars/<groupname> file. +You may also wish to keep these in group variables instead, or file them in a group_vars/<groupname> file. See the rest of the documentation for more information about how to organize variables. .. _use_ssh: From 151e09d129d63ce485d42d3f6cf0915bb8bd8cee Mon Sep 17 00:00:00 2001 From: Eric Feliksik <e.feliksik@nerdalize.com> Date: Tue, 5 Jan 2016 01:34:45 +0100 Subject: [PATCH 0280/1113] use unix shred if possible, otherwise fast custom impl; do not shred encrypted file --- lib/ansible/parsing/vault/__init__.py | 90 ++++++++++++++++++--------- 1 file changed, 62 insertions(+), 28 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 28e819860ae..bcd038c8b8d 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -219,41 +219,67 @@ class VaultEditor: def __init__(self, password): self.vault = VaultLib(password) + + def _shred_file_custom(self, tmp_path): + """"Destroy a file, when shred (core-utils) is not available - def _shred_file(self, tmp_path): - """Securely destroy a decrypted file + Unix `shred' destroys files "so that they can be recovered only with great difficulty with + specialised hardware, if at all". It is based on the method from the paper + "Secure Deletion of Data from Magnetic and Solid-State Memory", + Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996). - Inspired by unix `shred', try to destroy the secrets "so that they can be - recovered only with great difficulty with specialised hardware, if at all". + We do not go to that length to re-implement shred in Python; instead, overwriting with a block + of random data should suffice. See https://github.com/ansible/ansible/pull/13700 . - - Note that: - - For flash: overwriting would have no effect (due to wear leveling). But the - added disk wear is considered insignificant. - - For other storage systems: the filesystem lies to the vfs (kernel), the disk - driver lies to the filesystem and the disk lies to the driver. But it's better - than nothing. - - most tmp dirs are now tmpfs (ramdisks), for which this is a non-issue. """ - def generate_data(length): - import string, random - chars = string.ascii_lowercase + string.ascii_uppercase + string.digits - return ''.join(random.SystemRandom().choice(chars) for _ in range(length)) + file_len = os.path.getsize(tmp_path) + + passes = 3 + with open(tmp_path, "wb") as fh: + for _ in range(passes): + fh.seek(0, 0) + # get a random chunk of data + data = os.urandom(min(1024*1024*2, file_len)) + bytes_todo = file_len + while bytes_todo > 0: + chunk = data[:bytes_todo] + fh.write(chunk) + bytes_todo -= len(chunk) + + assert(fh.tell() == file_len) + os.fsync(fh) + + + def _shred_file(self, tmp_path): + """Securely destroy a decrypted file + + Note standard limitations of GNU shred apply (For flash, overwriting would have no effect + due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never + guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks), + it is a non-issue. + + Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is + a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on + a custom shredding method. + """ if not os.path.isfile(tmp_path): # file is already gone return + + try: + r = call(['shred', tmp_path]) + except OSError as e: + # shred is not available on this system, or some other error occured. + self._shred_file_custom(tmp_path) + r = 0 + + if r != 0: + # we could not successfully execute unix shred; therefore, do custom shred. + self._shred_file_custom(tmp_path) - ld = os.path.getsize(tmp_path) - passes = 3 - with open(tmp_path, "w") as fh: - for _ in range(passes): - fh.seek(0, 0) - data = generate_data(ld) - fh.write(data) - os.fsync(fh) os.remove(tmp_path) def _edit_file_helper(self, filename, existing_data=None, force_save=False): @@ -262,7 +288,7 @@ class VaultEditor: _, tmp_path = tempfile.mkstemp() if existing_data: - self.write_data(existing_data, tmp_path) + self.write_data(existing_data, tmp_path, shred=False) # drop the user into an editor on the tmp file try: @@ -300,7 +326,7 @@ class VaultEditor: ciphertext = self.read_data(filename) plaintext = self.vault.decrypt(ciphertext) - self.write_data(plaintext, output_file or filename) + self.write_data(plaintext, output_file or filename, shred=False) def create_file(self, filename): """ create a new encrypted file """ @@ -365,13 +391,21 @@ class VaultEditor: return data - def write_data(self, data, filename): + def write_data(self, data, filename, shred=True): + """write data to given path + + if shred==True, make sure that the original data is first shredded so + that is cannot be recovered + """ bytes = to_bytes(data, errors='strict') if filename == '-': sys.stdout.write(bytes) else: if os.path.isfile(filename): - self._shred_file(filename) + if shred: + self._shred_file(filename) + else: + os.remove(filename) with open(filename, "wb") as fh: fh.write(bytes) From 0d7c3284595c34f53c903995b8dff5fc65303c89 Mon Sep 17 00:00:00 2001 From: John Mitchell <jlmitch5@ncsu.edu> Date: Mon, 4 Jan 2016 19:52:37 -0500 Subject: [PATCH 0281/1113] fixed css minification make target for docsite --- docsite/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/Makefile b/docsite/Makefile index 92129f78514..15347f84bf9 100644 --- a/docsite/Makefile +++ b/docsite/Makefile @@ -43,4 +43,4 @@ modules: $(FORMATTER) ../hacking/templates/rst.j2 PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../lib/ansible/modules -o rst/ staticmin: - cat _themes/srtd/static/css/theme.css | sed -e 's/^[ \t]*//g; s/[ \t]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css + cat _themes/srtd/static/css/theme.css | sed -e 's/^[ ]*//g; s/[ ]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css From 692ef6dcc90cf696b4bc25bedb979150adf6e7b9 Mon Sep 17 00:00:00 2001 From: John Mitchell <jlmitch5@ncsu.edu> Date: Mon, 4 Jan 2016 19:58:51 -0500 Subject: [PATCH 0282/1113] made docsite ads configurable by marketing --- docsite/_themes/srtd/layout.html | 22 ++++++++++++---------- docsite/_themes/srtd/static/css/theme.css | 21 ++------------------- 2 files changed, 14 insertions(+), 29 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index 16f0d8d2663..1408be8165d 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -166,7 +166,7 @@ <!-- changeable widget --> <center> <br/> -<a href="http://www.ansible.com/tower?utm_source=docs"> +<a href="http://www.ansible.com/docs-left?utm_source=docs"> <img style="border-width:0px;" src="https://cdn2.hubspot.net/hubfs/330046/docs-graphics/ASB-docs-left-rail.png" /> </a> </center> @@ -189,15 +189,17 @@ <div class="wy-nav-content"> <div class="rst-content"> - <!-- Tower ads --> - <a class="DocSiteBanner" href="http://www.ansible.com/tower?utm_source=docs"> - <div class="DocSiteBanner-imgWrapper"> - <img src="https://cdn2.hubspot.net/hubfs/330046/docs-graphics/ASB-docs-top-left.png"> - </div> - <div class="DocSiteBanner-imgWrapper"> - <img src="https://cdn2.hubspot.net/hubfs/330046/docs-graphics/ASB-docs-top-right.png"> - </div> - </a> + <!-- Banner ads --> + <div class="DocSiteBanner"> + <a class="DocSiteBanner-imgWrapper" + href="http://www.ansible.com/docs-top?utm_source=docs"> + <img src="https://cdn2.hubspot.net/hubfs/330046/docs-graphics/ASB-docs-top-left.png"> + </a> + <a class="DocSiteBanner-imgWrapper" + href="http://www.ansible.com/docs-top?utm_source=docs"> + <img src="https://cdn2.hubspot.net/hubfs/330046/docs-graphics/ASB-docs-top-right.png"> + </a> + </div> {% include "breadcrumbs.html" %} <div id="page-content"> diff --git a/docsite/_themes/srtd/static/css/theme.css b/docsite/_themes/srtd/static/css/theme.css index 4f7cbc8caaf..246e513b799 100644 --- a/docsite/_themes/srtd/static/css/theme.css +++ b/docsite/_themes/srtd/static/css/theme.css @@ -4723,33 +4723,16 @@ span[id*='MathJax-Span'] { padding: 0.4045em 1.618em; } - .DocSiteBanner { - width: 100%; display: flex; display: -webkit-flex; + justify-content: center; + -webkit-justify-content: center; flex-wrap: wrap; -webkit-flex-wrap: wrap; - justify-content: space-between; - -webkit-justify-content: space-between; - background-color: #ff5850; margin-bottom: 25px; } .DocSiteBanner-imgWrapper { max-width: 100%; } - -@media screen and (max-width: 1403px) { - .DocSiteBanner { - width: 100%; - display: flex; - display: -webkit-flex; - flex-wrap: wrap; - -webkit-flex-wrap: wrap; - justify-content: center; - -webkit-justify-content: center; - background-color: #fff; - margin-bottom: 25px; - } -} From 1c3b16c2ddf42c687738687cbc1a708cd05d2112 Mon Sep 17 00:00:00 2001 From: John Mitchell <jlmitch5@ncsu.edu> Date: Mon, 4 Jan 2016 20:02:01 -0500 Subject: [PATCH 0283/1113] udpate copyright date --- docsite/_themes/srtd/footer.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/_themes/srtd/footer.html b/docsite/_themes/srtd/footer.html index b70cfde7ad8..30b02a8978b 100644 --- a/docsite/_themes/srtd/footer.html +++ b/docsite/_themes/srtd/footer.html @@ -13,7 +13,7 @@ <hr/> <p> - © Copyright 2015 <a href="http://ansible.com">Ansible, Inc.</a>. + © Copyright 2016 <a href="http://ansible.com">Ansible, Inc.</a>. {%- if last_updated %} {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %} From 559ba467c09b112ecd7dc8681888b6631fcacba3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 22 Dec 2015 11:11:50 -0800 Subject: [PATCH 0284/1113] Revert "Convert to bytes later so that make_become_command can jsut operate on text type." This reverts commit c4da5840b5e38aea1740e68f7100256c93dfbb17. Going to do this in the connection plugins --- lib/ansible/plugins/action/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 5383f8afd43..e54898b6db3 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -488,6 +488,8 @@ class ActionBase(with_metaclass(ABCMeta, object)): verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' + # We may need to revisit this later. + cmd = to_bytes(cmd, errors='strict') if executable is not None: cmd = executable + ' -c ' + cmd @@ -504,7 +506,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): cmd = self._play_context.make_become_cmd(cmd, executable=executable) display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) - rc, stdout, stderr = self._connection.exec_command(to_bytes(cmd, errors='strict'), in_data=in_data, sudoable=sudoable) + rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type From 1ed3a018eb27dd06b08dbad57a162c2865abb635 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 22 Dec 2015 11:12:14 -0800 Subject: [PATCH 0285/1113] Revert "Fix make tests-py3 on devel. Fix for https://github.com/ansible/ansible/issues/13638." This reverts commit e70061334aa99bee466295980f4cd4146096dc29. Going to do this in the connection plugins --- test/units/plugins/action/test_action.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index dcd04375959..0e47b6a5381 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -42,14 +42,14 @@ class TestActionBase(unittest.TestCase): play_context.become = True play_context.become_user = play_context.remote_user = 'root' - play_context.make_become_cmd = Mock(return_value=b'CMD') + play_context.make_become_cmd = Mock(return_value='CMD') - action_base._low_level_execute_command(b'ECHO', sudoable=True) + action_base._low_level_execute_command('ECHO', sudoable=True) play_context.make_become_cmd.assert_not_called() play_context.remote_user = 'apo' - action_base._low_level_execute_command(b'ECHO', sudoable=True) - play_context.make_become_cmd.assert_called_once_with(b'ECHO', executable=None) + action_base._low_level_execute_command('ECHO', sudoable=True) + play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None) play_context.make_become_cmd.reset_mock() @@ -57,7 +57,7 @@ class TestActionBase(unittest.TestCase): C.BECOME_ALLOW_SAME_USER = True try: play_context.remote_user = 'root' - action_base._low_level_execute_command(b'ECHO SAME', sudoable=True) - play_context.make_become_cmd.assert_called_once_with(b'ECHO SAME', executable=None) + action_base._low_level_execute_command('ECHO SAME', sudoable=True) + play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None) finally: C.BECOME_ALLOW_SAME_USER = become_allow_same_user From 8d57ffd16bd1025f7b04127fec760c13aca6d6dd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 22 Dec 2015 11:12:41 -0800 Subject: [PATCH 0286/1113] Revert "Transform the command we pass to subprocess into a byte string in _low_level-exec_command" This reverts commit 0c013f592a31c06baac7aadf27d23598f6abe931. Going to do this in the connection plugin --- lib/ansible/plugins/action/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e54898b6db3..3f4fff588e9 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -488,8 +488,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' - # We may need to revisit this later. - cmd = to_bytes(cmd, errors='strict') + if executable is not None: cmd = executable + ' -c ' + cmd From 9e32099b5e0535c2daf656e9d619e9a2efe9d3b6 Mon Sep 17 00:00:00 2001 From: Bruno Almeida do Lago <teolupus.ext@gmail.com> Date: Tue, 5 Jan 2016 16:48:49 +1300 Subject: [PATCH 0287/1113] Added OpenStack dynamic inventory example Added an example illustrating how to use the OpenStack dynamic inventory script to the "Dynamic Inventory" section. --- docsite/rst/intro_dynamic_inventory.rst | 71 +++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 5f491ebc2ef..85feaa143bd 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -206,6 +206,77 @@ explicitly clear the cache, you can run the ec2.py script with the ``--refresh-c # ./ec2.py --refresh-cache +.. _openstack_example: + +Example: OpenStack External Inventory Script +```````````````````````````````````````````` + +If you use an OpenStack based cloud, instead of manually maintaining your own inventory file, you can use the openstack.py dynamic inventory to pull information about your compute instances directly from OpenStack. + +You can download the latest version of the OpenStack inventory script at: https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py + +You can use the inventory script explicitly (by passing the `-i openstack.py` argument to Ansible) or implicitly (by placing the script at `/etc/ansible/hosts`). + +Explicit use of inventory script +++++++++++++++++++++++++++++++++ + +Download the latest version of the OpenStack dynamic inventory script and make it executable:: + + wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py + chmod +x openstack.py + +Source an OpenStack RC file:: + + source openstack.rc + +.. note:: + + An OpenStack RC file contains the environment variables required by the client tools to establish a connection with the cloud provider, such as the authentication URL, user name, password and region name. For more information on how to download, create or source an OpenStack RC file, please refer to http://docs.openstack.org/cli-reference/content/cli_openrc.html. + +You can confirm the file has been successfully sourced by running a simple command, such as `nova list` and ensuring it return no errors. + +.. note:: + + The OpenStack command line clients are required to run the `nova list` command. For more information on how to install them, please refer to http://docs.openstack.org/cli-reference/content/install_clients.html. + +You can test the OpenStack dynamic inventory script manually to confirm it is working as expected:: + + ./openstack.py --list + +After a few moments you should see some JSON output with information about your compute instances. + +Once you confirm the dynamic inventory script is working as expected, you can tell Ansible to use the `openstack.py` script as an inventory file, as illustrated below:: + +ansible -i openstack.py all -m ping + +Implicit use of inventory script +++++++++++++++++++++++++++++++++ + +Download the latest version of the OpenStack dynamic inventory script, make it executable and copy it to `/etc/ansible/hosts`:: + + wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py + chmod +x openstack.py + sudo cp openstack.py /etc/ansible/hosts + +Download the sample configuration file, modify it to suit your needs and copy it to /etc/ansible/openstack.yml + + wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.yml + vi openstack.yml + sudo cp openstack.yml /etc/ansible/ + +You can test the OpenStack dynamic inventory script manually to confirm it is working as expected:: + + /etc/ansible/hosts --list + +After a few moments you should see some JSON output with information about your compute instances. + +Refresh the cache ++++++++++++++++++ + +Note that the OpenStack dynamic inventory script will cache results to avoid repeated API calls. To explicitly clear the cache, you can run the openstack.py (or hosts) script with the --refresh parameter: + + ./openstack.py --refresh + .. _other_inventory_scripts: Other inventory scripts From c0a8cd950b909983cdc763f80495595d68597089 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 4 Jan 2016 19:23:12 -0800 Subject: [PATCH 0288/1113] Fix problems with non-ascii values passed as part of the command to connection plugins @drybjed discovered this with non-ascii environment variables and command line arguments to script and raw module. --- lib/ansible/plugins/connection/__init__.py | 1 + lib/ansible/plugins/connection/chroot.py | 2 + lib/ansible/plugins/connection/docker.py | 7 ++- lib/ansible/plugins/connection/jail.py | 6 ++- lib/ansible/plugins/connection/libvirt_lxc.py | 6 ++- lib/ansible/plugins/connection/local.py | 11 ++++- lib/ansible/plugins/connection/ssh.py | 17 +++++-- lib/ansible/plugins/connection/zone.py | 8 ++-- test/integration/unicode-test-script | 7 +++ test/integration/unicode.yml | 45 +++++++++++++++++++ 10 files changed, 97 insertions(+), 13 deletions(-) create mode 100755 test/integration/unicode-test-script diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 06616bac4ca..ff00bc02380 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -91,6 +91,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): @property def connected(self): + '''Read-only property holding whether the connection to the remote host is active or closed.''' return self._connected def _become_method_supported(self): diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py index c86ea1fc355..ba41ffb5d88 100644 --- a/lib/ansible/plugins/connection/chroot.py +++ b/lib/ansible/plugins/connection/chroot.py @@ -30,6 +30,7 @@ from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase from ansible.module_utils.basic import is_executable +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -90,6 +91,7 @@ class Connection(ConnectionBase): local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd), host=self.chroot) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py index 4e08f56a095..ce556a1431b 100644 --- a/lib/ansible/plugins/connection/docker.py +++ b/lib/ansible/plugins/connection/docker.py @@ -36,6 +36,7 @@ from distutils.version import LooseVersion import ansible.constants as C from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -125,7 +126,8 @@ class Connection(ConnectionBase): # -i is needed to keep stdin open which allows pipelining to work local_cmd = [self.docker_cmd, "exec", '-i', self._play_context.remote_addr, executable, '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self._play_context.remote_addr) + display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -159,6 +161,7 @@ class Connection(ConnectionBase): if self.can_copy_bothways: # only docker >= 1.8.1 can do this natively args = [ self.docker_cmd, "cp", in_path, "%s:%s" % (self._play_context.remote_addr, out_path) ] + args = map(to_bytes, args) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: @@ -170,6 +173,7 @@ class Connection(ConnectionBase): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' args = [self.docker_cmd, "exec", "-i", self._play_context.remote_addr, executable, "-c", "dd of={0} bs={1}".format(out_path, BUFSIZE)] + args = map(to_bytes, args) with open(in_path, 'rb') as in_file: try: p = subprocess.Popen(args, stdin=in_file, @@ -192,6 +196,7 @@ class Connection(ConnectionBase): out_dir = os.path.dirname(out_path) args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir] + args = map(to_bytes, args) p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/jail.py b/lib/ansible/plugins/connection/jail.py index e665692543a..8f88b6ad28f 100644 --- a/lib/ansible/plugins/connection/jail.py +++ b/lib/ansible/plugins/connection/jail.py @@ -30,6 +30,7 @@ import traceback from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -83,7 +84,7 @@ class Connection(ConnectionBase): return stdout.split() def get_jail_path(self): - p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'], + p = subprocess.Popen([self.jls_cmd, '-j', to_bytes(self.jail), '-q', 'path'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -109,7 +110,8 @@ class Connection(ConnectionBase): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self.jail) + display.vvv("EXEC %s" % (local_cmd,), host=self.jail) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/libvirt_lxc.py b/lib/ansible/plugins/connection/libvirt_lxc.py index dc82d984040..3bfff8b1c35 100644 --- a/lib/ansible/plugins/connection/libvirt_lxc.py +++ b/lib/ansible/plugins/connection/libvirt_lxc.py @@ -30,6 +30,7 @@ import traceback from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -65,7 +66,7 @@ class Connection(ConnectionBase): return cmd def _check_domain(self, domain): - p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', domain], + p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', to_bytes(domain)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() if p.returncode: @@ -89,7 +90,8 @@ class Connection(ConnectionBase): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self.lxc) + display.vvv("EXEC %s" % (local_cmd,), host=self.lxc) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py index e69281d0f3b..29b1e9a5ca2 100644 --- a/lib/ansible/plugins/connection/local.py +++ b/lib/ansible/plugins/connection/local.py @@ -25,10 +25,13 @@ import select import fcntl import getpass +from ansible.compat.six import text_type, binary_type + import ansible.constants as C from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -69,9 +72,15 @@ class Connection(ConnectionBase): raise AnsibleError("Internal Error: this module does not support optimized module pipelining") executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None - display.vvv("{0} EXEC {1}".format(self._play_context.remote_addr, cmd)) + display.vvv(u"{0} EXEC {1}".format(self._play_context.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook display.debug("opening command with Popen()") + + if isinstance(cmd, (text_type, binary_type)): + cmd = to_bytes(cmd) + else: + cmd = map(to_bytes, cmd) + p = subprocess.Popen( cmd, shell=isinstance(cmd, basestring), diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index a2abcf20aee..074f6aaa8ae 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -33,6 +33,7 @@ from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNo from ansible.plugins.connection import ConnectionBase from ansible.utils.path import unfrackpath, makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode +from ansible.compat.six import text_type, binary_type try: from __main__ import display @@ -320,7 +321,7 @@ class Connection(ConnectionBase): ''' display_cmd = map(pipes.quote, cmd) - display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)), host=self.host) + display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host) # Start the given command. If we don't need to pipeline data, we can try # to use a pseudo-tty (ssh will have been invoked with -tt). If we are @@ -328,6 +329,12 @@ class Connection(ConnectionBase): # old pipes. p = None + + if isinstance(cmd, (text_type, binary_type)): + cmd = to_bytes(cmd) + else: + cmd = map(to_bytes, cmd) + if not in_data: try: # Make sure stdin is a proper pty to avoid tcgetattr errors @@ -365,7 +372,7 @@ class Connection(ConnectionBase): # only when using ssh. Otherwise we can send initial data straightaway. state = states.index('ready_to_send') - if 'ssh' in cmd: + if b'ssh' in cmd: if self._play_context.prompt: # We're requesting escalation with a password, so we have to # wait for a password prompt. @@ -538,7 +545,7 @@ class Connection(ConnectionBase): stdin.close() if C.HOST_KEY_CHECKING: - if cmd[0] == "sshpass" and p.returncode == 6: + if cmd[0] == b"sshpass" and p.returncode == 6: raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.') controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr @@ -600,7 +607,7 @@ class Connection(ConnectionBase): raise AnsibleConnectionFailure("Failed to connect to the host via ssh.") except (AnsibleConnectionFailure, Exception) as e: if attempt == remaining_tries - 1: - raise e + raise else: pause = 2 ** attempt - 1 if pause > 30: @@ -674,6 +681,8 @@ class Connection(ConnectionBase): # temporarily disabled as we are forced to currently close connections after every task because of winrm # if self._connected and self._persistent: # cmd = self._build_command('ssh', '-O', 'stop', self.host) + # + # cmd = map(to_bytes, cmd) # p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # stdout, stderr = p.communicate() diff --git a/lib/ansible/plugins/connection/zone.py b/lib/ansible/plugins/connection/zone.py index 75d7db545d6..b65c80b73fb 100644 --- a/lib/ansible/plugins/connection/zone.py +++ b/lib/ansible/plugins/connection/zone.py @@ -31,6 +31,7 @@ import traceback from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase +from ansible.utils import to_bytes try: from __main__ import display @@ -56,8 +57,8 @@ class Connection(ConnectionBase): if os.geteuid() != 0: raise AnsibleError("zone connection requires running as root") - self.zoneadm_cmd = self._search_executable('zoneadm') - self.zlogin_cmd = self._search_executable('zlogin') + self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm')) + self.zlogin_cmd = to_bytes(self._search_executable('zlogin')) if self.zone not in self.list_zones(): raise AnsibleError("incorrect zone name %s" % self.zone) @@ -86,7 +87,7 @@ class Connection(ConnectionBase): def get_zone_path(self): #solaris10vm# zoneadm -z cswbuild list -p #-:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared - process = subprocess.Popen([self.zoneadm_cmd, '-z', self.zone, 'list', '-p'], + process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -113,6 +114,7 @@ class Connection(ConnectionBase): # this through /bin/sh -c here. Instead it goes through the shell # that zlogin selects. local_cmd = [self.zlogin_cmd, self.zone, cmd] + local_cmd = map(to_bytes, local_cmd) display.vvv("EXEC %s" % (local_cmd), host=self.zone) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, diff --git a/test/integration/unicode-test-script b/test/integration/unicode-test-script new file mode 100755 index 00000000000..340f2a9f5b2 --- /dev/null +++ b/test/integration/unicode-test-script @@ -0,0 +1,7 @@ +#!/bin/sh + +echo "Non-ascii arguments:" +echo $@ + +echo "Non-ascii Env var:" +echo $option diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml index 6e8e073a79d..f38bf8f5e86 100644 --- a/test/integration/unicode.yml +++ b/test/integration/unicode.yml @@ -49,6 +49,51 @@ that: - "'¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×' in output.stdout_lines" + - name: Run raw with non-ascii options + raw: "/bin/echo Zażółć gęślą jaźń" + register: results + + - name: Check that raw output the right thing + assert: + that: + - "'Zażółć gęślą jaźń' in results.stdout_lines" + + - name: Run a script with non-ascii options and environment + script: unicode-test-script --option "Zażółć gęślą jaźń" + environment: + option: Zażółć + register: results + + - name: Check that script output includes the nonascii arguments and environment values + assert: + that: + - "'--option Zażółć gęślą jaźń' in results.stdout_lines" + - "'Zażółć' in results.stdout_lines" + + - name: Ping with non-ascii environment variable and option + ping: + data: "Zażółć gęślą jaźń" + environment: + option: Zażółć + register: results + + - name: Check that ping with non-ascii data was correct + assert: + that: + - "'Zażółć gęślą jaźń' == results.ping" + + - name: Command that echos a non-ascii env var + command: "echo $option" + environment: + option: Zażółć + register: results + + - name: Check that a non-ascii env var was passed to the command module + assert: + that: + - "'Zażółć' in results.stdout_lines" + + - name: 'A play for hosts in group: ĪīĬĭ' hosts: 'ĪīĬĭ' gather_facts: true From 6470f7de2cf4cfc37fa5fef66c7e37514b6139d3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 5 Jan 2016 07:53:22 -0800 Subject: [PATCH 0289/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 002028748f0..33014c6db1c 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 002028748f080961ade801c30e194bfd4ba043ce +Subproject commit 33014c6db1ce757d0ffa065e6c9924ac4db1cacc diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f6a7b6dd1f7..82a4cf84be8 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f6a7b6dd1f7be93ba640c50bf26adeeabb5af46f +Subproject commit 82a4cf84be82244d0cf7d043c8cbb4f176f086db From 11ce08b9dde32c7e4b51a6fffc22f301c81181be Mon Sep 17 00:00:00 2001 From: Eric Feliksik <e.feliksik@nerdalize.com> Date: Tue, 5 Jan 2016 18:04:38 +0100 Subject: [PATCH 0290/1113] cleaner implementation and random chunk length. --- lib/ansible/parsing/vault/__init__.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index bcd038c8b8d..1d4eeef4653 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -22,6 +22,7 @@ import shlex import shutil import sys import tempfile +import random from io import BytesIO from subprocess import call from ansible.errors import AnsibleError @@ -235,20 +236,21 @@ class VaultEditor: """ file_len = os.path.getsize(tmp_path) + max_chunk_len = min(1024*1024*2, file_len) passes = 3 with open(tmp_path, "wb") as fh: for _ in range(passes): fh.seek(0, 0) - # get a random chunk of data - data = os.urandom(min(1024*1024*2, file_len)) - bytes_todo = file_len - while bytes_todo > 0: - chunk = data[:bytes_todo] - fh.write(chunk) - bytes_todo -= len(chunk) - - assert(fh.tell() == file_len) + # get a random chunk of data, each pass with other length + chunk_len = random.randint(max_chunk_len/2, max_chunk_len) + data = os.urandom(chunk_len) + + for _ in range(0, file_len // chunk_len): + fh.write(data) + fh.write(data[:file_len % chunk_len]) + + assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy os.fsync(fh) @@ -273,13 +275,12 @@ class VaultEditor: r = call(['shred', tmp_path]) except OSError as e: # shred is not available on this system, or some other error occured. - self._shred_file_custom(tmp_path) - r = 0 + r = 1 if r != 0: # we could not successfully execute unix shred; therefore, do custom shred. self._shred_file_custom(tmp_path) - + os.remove(tmp_path) def _edit_file_helper(self, filename, existing_data=None, force_save=False): From 9972c27a9bc1dd2c9051368e082e2b366a04acbe Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 4 Jan 2016 18:44:09 -0500 Subject: [PATCH 0291/1113] now handles 'non file diffs' this allows modules to pass back a 'diff' dict and it will still show using the file interface --- lib/ansible/plugins/callback/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index cc2a9ad0e75..faf04b1180f 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -116,6 +116,10 @@ class CallbackBase: if 'src_larger' in diff: ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger']) if 'before' in diff and 'after' in diff: + # format complex structures into 'files' + for x in ['before', 'after']: + if isinstance(diff[x], dict): + diff[x] = json.dumps(diff[x], sort_keys=True, indent=4) if 'before_header' in diff: before_header = "before: %s" % diff['before_header'] else: From f3c45adfb8670701d0b19e86787a5213bb5afb5f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 4 Jan 2016 19:58:06 -0500 Subject: [PATCH 0292/1113] simplified diff handling in callback no need for the copy or other complexity --- lib/ansible/plugins/callback/default.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index e515945bba5..276ac435f4b 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -137,11 +137,8 @@ class CallbackModule(CallbackBase): def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: for res in result._result['results']: - newres = self._copy_result(result) - res['item'] = self._get_item(res) - newres._result = res - - self.v2_on_file_diff(newres) + if 'diff' in res: + self._display.display(self._get_diff(res['diff'])) elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) From a65543bbafbd328e7848a99d2a570f71c43a53a0 Mon Sep 17 00:00:00 2001 From: Charles Paul <cpaul@ansible.com> Date: Tue, 5 Jan 2016 14:52:06 -0600 Subject: [PATCH 0293/1113] adding password no_log and cleaning up argument spec --- lib/ansible/module_utils/vca.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/vca.py b/lib/ansible/module_utils/vca.py index ef89d545569..9737cca8b47 100644 --- a/lib/ansible/module_utils/vca.py +++ b/lib/ansible/module_utils/vca.py @@ -35,8 +35,8 @@ class VcaError(Exception): def vca_argument_spec(): return dict( - username=dict(), - password=dict(), + username=dict(type='str', aliases=['user'], required=True), + password=dict(type='str', aliases=['pass','passwd'], required=True, no_log=True), org=dict(), service_id=dict(), instance_id=dict(), From dc47c25e589f1c2b1f44867076624f0e0564b7c6 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 5 Jan 2016 22:01:01 -0500 Subject: [PATCH 0294/1113] Minor tweak to ensure diff is not empty in callback for file diffs --- lib/ansible/plugins/callback/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 6ca728e65f8..dfad6579343 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -137,7 +137,7 @@ class CallbackModule(CallbackBase): def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: for res in result._result['results']: - if 'diff' in res: + if 'diff' in res and res['diff']: self._display.display(self._get_diff(res['diff'])) elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) From 7c8374e0f8e153368bb6a22caf7b7ada07f8d797 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen <ams@2ndQuadrant.com> Date: Wed, 6 Jan 2016 20:44:19 +0530 Subject: [PATCH 0295/1113] Strip string terms before templating The earlier code did call terms.strip(), but ignored the return value instead of passing that in to templar.template(). Clearly an oversight. --- lib/ansible/utils/listify.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py index 7fe83a8fa0c..d834737ab58 100644 --- a/lib/ansible/utils/listify.py +++ b/lib/ansible/utils/listify.py @@ -31,9 +31,8 @@ __all__ = ['listify_lookup_plugin_terms'] def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=False, convert_bare=True): if isinstance(terms, string_types): - stripped = terms.strip() # TODO: warn/deprecation on bare vars in with_ so we can eventually remove fail on undefined override - terms = templar.template(terms, convert_bare=convert_bare, fail_on_undefined=fail_on_undefined) + terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined) else: terms = templar.template(terms, fail_on_undefined=fail_on_undefined) From 11b55be5bbb90b2bc917b2637d6fcdbe1a15092d Mon Sep 17 00:00:00 2001 From: muffl0n <sven@schliesing.de> Date: Thu, 20 Aug 2015 10:31:48 +0200 Subject: [PATCH 0296/1113] Show version without supplying a dummy action fixes #12004 parsing x2 does not seem to break anything --- lib/ansible/cli/galaxy.py | 7 +++++-- lib/ansible/cli/vault.py | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 476a7d0f897..a022d17859c 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -50,7 +50,7 @@ class GalaxyCLI(CLI): SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup") - + def __init__(self, args): self.api = None self.galaxy = None @@ -64,6 +64,9 @@ class GalaxyCLI(CLI): epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) + # Workaround for #12004: show version without supplying a dummy action + self.parser.parse_args() + self.set_action() # options specific to actions @@ -141,7 +144,7 @@ class GalaxyCLI(CLI): return True def run(self): - + super(GalaxyCLI, self).run() # if not offline, get connect to galaxy api diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index 9908f17e578..50a6fdebdc8 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -53,6 +53,9 @@ class VaultCLI(CLI): epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) + # Workaround for #12004: show version without supplying a dummy action + self.parser.parse_args() + self.set_action() # options specific to self.actions From ab2f47327a82148441140c9b98a02a6e28877153 Mon Sep 17 00:00:00 2001 From: Sandra Wills <docschick@ansible.com> Date: Wed, 6 Jan 2016 13:59:25 -0500 Subject: [PATCH 0297/1113] removed the "wy-side-nav-search" element this is so we can use the new swiftype search and it's search input --- docsite/_themes/srtd/layout.html | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index 41b6b75c1d2..a10b7656aab 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -150,11 +150,6 @@ </a> </div> - <div class="wy-side-nav-search" style="background-color:#5bbdbf;height=80px;margin:'auto auto auto auto'"> - <!-- <a href="{{ pathto(master_doc) }}" class="icon icon-home"> {{ project }}</a> --> - {% include "searchbox.html" %} - </div> - <div id="menu-id" class="wy-menu wy-menu-vertical" data-spy="affix"> {% set toctree = toctree(maxdepth=2, collapse=False) %} {% if toctree %} From 9ac9c75d7600d9d4588f435821add1c4c24c6268 Mon Sep 17 00:00:00 2001 From: Tomasz Kontusz <tomasz.kontusz@gmail.com> Date: Tue, 5 Jan 2016 20:19:47 +0100 Subject: [PATCH 0298/1113] linear strategy: don't look at tasks from the next block --- lib/ansible/plugins/strategy/linear.py | 31 ++++++++++++++++++-------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index bfa2c37ce43..04abe0eb8f0 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -63,19 +63,32 @@ class StrategyModule(StrategyBase): num_rescue = 0 num_always = 0 - lowest_cur_block = len(iterator._blocks) - display.debug("counting tasks in each state of execution") - for (k, v) in iteritems(host_tasks): - if v is None: - continue + host_tasks_to_run = [(host, state_task) + for host, state_task in iteritems(host_tasks) + if state_task and state_task[1]] + # Drop noops + host_tasks_to_run = [ + (host, (state, task)) + for host, (state, task) in host_tasks_to_run + if task.action != 'meta' or task.args.get('_raw_params') != 'noop' + ] + if host_tasks_to_run: + lowest_cur_block = min( + (s.cur_block for h, (s, t) in host_tasks_to_run + if s.run_state != PlayIterator.ITERATING_COMPLETE)) + else: + # empty host_tasks_to_run will just run till the end of the function + # without ever touching lowest_cur_block + lowest_cur_block = None + + for (k, v) in host_tasks_to_run: (s, t) = v - if t is None: - continue - if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE: - lowest_cur_block = s.cur_block + if s.cur_block > lowest_cur_block: + # Not the current block, ignore it + continue if s.run_state == PlayIterator.ITERATING_SETUP: num_setups += 1 From 90cb7e193738d4a7a1d53daa9e7a98e75d0b0301 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 6 Jan 2016 09:58:20 -0500 Subject: [PATCH 0299/1113] Don't drop noops from task counting code in linear strategy --- lib/ansible/plugins/strategy/linear.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 04abe0eb8f0..f441b88fe3d 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -67,12 +67,6 @@ class StrategyModule(StrategyBase): host_tasks_to_run = [(host, state_task) for host, state_task in iteritems(host_tasks) if state_task and state_task[1]] - # Drop noops - host_tasks_to_run = [ - (host, (state, task)) - for host, (state, task) in host_tasks_to_run - if task.action != 'meta' or task.args.get('_raw_params') != 'noop' - ] if host_tasks_to_run: lowest_cur_block = min( From 46903c80faaf2a73056e7b9fbd52085291b4931f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 6 Jan 2016 15:18:22 -0800 Subject: [PATCH 0300/1113] More fixes for unicode handling in the connection plugins. Tested that ssh, docker, local, lxc-libvirt, chroot all work with the updated unicode integration test. --- lib/ansible/inventory/script.py | 5 ++-- lib/ansible/plugins/connection/docker.py | 4 +-- lib/ansible/plugins/connection/local.py | 14 +++++------ lib/ansible/plugins/connection/ssh.py | 32 +++++++++++++----------- lib/ansible/plugins/connection/winrm.py | 2 +- lib/ansible/plugins/shell/sh.py | 4 +-- test/integration/unicode.yml | 30 ++++++++++++++++++++++ 7 files changed, 62 insertions(+), 29 deletions(-) diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index 6dfb1d2af08..cdfe676bcd2 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -31,6 +31,7 @@ from ansible.errors import AnsibleError from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.module_utils.basic import json_dict_bytes_to_unicode +from ansible.utils.unicode import to_str class InventoryScript: @@ -72,11 +73,11 @@ class InventoryScript: self.raw = self._loader.load(self.data) except Exception as e: sys.stderr.write(err + "\n") - raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(self.filename, str(e))) + raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(to_str(self.filename_, to_str(e))) if not isinstance(self.raw, Mapping): sys.stderr.write(err + "\n") - raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(self.filename)) + raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(to_str(self.filename))) self.raw = json_dict_bytes_to_unicode(self.raw) diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py index ce556a1431b..130317f24aa 100644 --- a/lib/ansible/plugins/connection/docker.py +++ b/lib/ansible/plugins/connection/docker.py @@ -113,7 +113,7 @@ class Connection(ConnectionBase): """ Connect to the container. Nothing to do """ super(Connection, self)._connect() if not self._connected: - display.vvv("ESTABLISH DOCKER CONNECTION FOR USER: {0}".format( + display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format( self._play_context.remote_user, host=self._play_context.remote_addr) ) self._connected = True @@ -172,7 +172,7 @@ class Connection(ConnectionBase): # running containers, so we use docker exec to implement this executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' args = [self.docker_cmd, "exec", "-i", self._play_context.remote_addr, executable, "-c", - "dd of={0} bs={1}".format(out_path, BUFSIZE)] + "dd of=%s bs=%s" % (out_path, BUFSIZE)] args = map(to_bytes, args) with open(in_path, 'rb') as in_file: try: diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py index 29b1e9a5ca2..5004c3698db 100644 --- a/lib/ansible/plugins/connection/local.py +++ b/lib/ansible/plugins/connection/local.py @@ -31,7 +31,7 @@ import ansible.constants as C from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase -from ansible.utils.unicode import to_bytes +from ansible.utils.unicode import to_bytes, to_str try: from __main__ import display @@ -57,7 +57,7 @@ class Connection(ConnectionBase): self._play_context.remote_user = getpass.getuser() if not self._connected: - display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user, host=self._play_context.remote_addr)) + display.vvv(u"ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user, host=self._play_context.remote_addr)) self._connected = True return self @@ -126,22 +126,22 @@ class Connection(ConnectionBase): super(Connection, self).put_file(in_path, out_path) - display.vvv("{0} PUT {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path)) + display.vvv(u"{0} PUT {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path)) if not os.path.exists(in_path): - raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path)) + raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_str(in_path))) try: shutil.copyfile(in_path, out_path) except shutil.Error: - raise AnsibleError("failed to copy: {0} and {1} are the same".format(in_path, out_path)) + raise AnsibleError("failed to copy: {0} and {1} are the same".format(to_str(in_path), to_str(out_path))) except IOError as e: - raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, e)) + raise AnsibleError("failed to transfer file to {0}: {1}".format(to_str(out_path), to_str(e))) def fetch_file(self, in_path, out_path): ''' fetch a file from local to local -- for copatibility ''' super(Connection, self).fetch_file(in_path, out_path) - display.vvv("{0} FETCH {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path)) + display.vvv(u"{0} FETCH {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path)) self.put_file(in_path, out_path) def close(self): diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index 074f6aaa8ae..0a0b2bb04bc 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -32,7 +32,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase from ansible.utils.path import unfrackpath, makedirs_safe -from ansible.utils.unicode import to_bytes, to_unicode +from ansible.utils.unicode import to_bytes, to_unicode, to_str from ansible.compat.six import text_type, binary_type try: @@ -197,7 +197,7 @@ class Connection(ConnectionBase): if user: self._add_args( "ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set", - ("-o", "User={0}".format(self._play_context.remote_user)) + ("-o", "User={0}".format(to_bytes(self._play_context.remote_user))) ) self._add_args( @@ -231,7 +231,7 @@ class Connection(ConnectionBase): raise AnsibleError("Cannot write to ControlPath %s" % cpdir) args = ("-o", "ControlPath={0}".format( - C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=cpdir)) + to_bytes(C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=cpdir))) ) self._add_args("found only ControlPersist; added ControlPath", args) @@ -320,7 +320,7 @@ class Connection(ConnectionBase): Starts the command and communicates with it until it ends. ''' - display_cmd = map(pipes.quote, cmd) + display_cmd = map(to_unicode, map(pipes.quote, cmd)) display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host) # Start the given command. If we don't need to pipeline data, we can try @@ -354,7 +354,7 @@ class Connection(ConnectionBase): if self._play_context.password: os.close(self.sshpass_pipe[0]) - os.write(self.sshpass_pipe[1], "{0}\n".format(self._play_context.password)) + os.write(self.sshpass_pipe[1], "{0}\n".format(to_bytes(self._play_context.password))) os.close(self.sshpass_pipe[1]) ## SSH state machine @@ -562,7 +562,7 @@ class Connection(ConnectionBase): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr) + display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr) # we can only use tty when we are not pipelining the modules. piping # data into /usr/bin/python inside a tty automatically invokes the @@ -630,44 +630,46 @@ class Connection(ConnectionBase): super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host) + display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host) if not os.path.exists(in_path): - raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path)) + raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_str(in_path))) # scp and sftp require square brackets for IPv6 addresses, but # accept them for hostnames and IPv4 addresses too. host = '[%s]' % self.host if C.DEFAULT_SCP_IF_SSH: - cmd = self._build_command('scp', in_path, '{0}:{1}'.format(host, pipes.quote(out_path))) + cmd = self._build_command('scp', in_path, u'{0}:{1}'.format(host, pipes.quote(out_path))) in_data = None else: - cmd = self._build_command('sftp', host) - in_data = "put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path)) + cmd = self._build_command('sftp', to_bytes(host)) + in_data = u"put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path)) + in_data = to_bytes(in_data, nonstring='passthru') (returncode, stdout, stderr) = self._run(cmd, in_data) if returncode != 0: - raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(out_path, stdout, stderr)) + raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(to_str(out_path), to_str(stdout), to_str(stderr))) def fetch_file(self, in_path, out_path): ''' fetch a file from remote to local ''' super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host) + display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host) # scp and sftp require square brackets for IPv6 addresses, but # accept them for hostnames and IPv4 addresses too. host = '[%s]' % self.host if C.DEFAULT_SCP_IF_SSH: - cmd = self._build_command('scp', '{0}:{1}'.format(host, pipes.quote(in_path)), out_path) + cmd = self._build_command('scp', u'{0}:{1}'.format(host, pipes.quote(in_path)), out_path) in_data = None else: cmd = self._build_command('sftp', host) - in_data = "get {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path)) + in_data = u"get {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path)) + in_data = to_bytes(in_data, nonstring='passthru') (returncode, stdout, stderr) = self._run(cmd, in_data) if returncode != 0: diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index ef863529b97..622231675d0 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -318,7 +318,7 @@ class Connection(ConnectionBase): local_sha1 = secure_hash(in_path) if not remote_sha1 == local_sha1: - raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(remote_sha1, local_sha1)) + raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_str(remote_sha1), to_str(local_sha1))) def fetch_file(self, in_path, out_path): diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index 7fbfa819ef1..8b20338a603 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -136,8 +136,8 @@ class ShellModule(object): shell_escaped_path = pipes.quote(path) test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) csums = [ - "({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3) - "({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4 + u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3) + u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4 ] cmd = (" %s " % self._SHELL_OR).join(csums) diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml index f38bf8f5e86..74d5772264c 100644 --- a/test/integration/unicode.yml +++ b/test/integration/unicode.yml @@ -93,6 +93,36 @@ that: - "'Zażółć' in results.stdout_lines" + - name: Clean a temp directory + file: + path: /var/tmp/ansible_test_unicode_get_put + state: absent + + - name: Create a temp directory + file: + path: /var/tmp/ansible_test_unicode_get_put + state: directory + + - name: Create a file with a non-ascii filename + file: + path: /var/tmp/ansible_test_unicode_get_put/Zażółć + state: touch + delegate_to: localhost + + - name: Put with unicode filename + copy: + src: /var/tmp/ansible_test_unicode_get_put/Zażółć + dest: /var/tmp/ansible_test_unicode_get_put/Zażółć2 + + - name: Fetch with unicode filename + fetch: + src: /var/tmp/ansible_test_unicode_get_put/Zażółć2 + dest: /var/tmp/ansible_test_unicode_get_put/ + + - name: Clean a temp directory + file: + path: /var/tmp/ansible_test_unicode_get_put + state: absent - name: 'A play for hosts in group: ĪīĬĭ' hosts: 'ĪīĬĭ' From d26d76ea5844202d11fe2c04303ab8889b638a66 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 6 Jan 2016 15:46:42 -0800 Subject: [PATCH 0301/1113] Fix typo --- lib/ansible/inventory/script.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index cdfe676bcd2..1fa49e50537 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -63,7 +63,6 @@ class InventoryScript: self.host_vars_from_top = None self._parse(stderr) - def _parse(self, err): all_hosts = {} @@ -73,7 +72,7 @@ class InventoryScript: self.raw = self._loader.load(self.data) except Exception as e: sys.stderr.write(err + "\n") - raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(to_str(self.filename_, to_str(e))) + raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(to_str(self.filename), to_str(e))) if not isinstance(self.raw, Mapping): sys.stderr.write(err + "\n") @@ -113,7 +112,7 @@ class InventoryScript: "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: - if not hostname in all_hosts: + if hostname not in all_hosts: all_hosts[hostname] = Host(hostname) host = all_hosts[hostname] group.add_host(host) @@ -149,7 +148,6 @@ class InventoryScript: got = self.host_vars_from_top.get(host.name, {}) return got - cmd = [self.filename, "--host", host.name] try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -162,4 +160,3 @@ class InventoryScript: return json_dict_bytes_to_unicode(self._loader.load(out)) except ValueError: raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out)) - From b7dcd7a3a03bd132fabd6eef956ef811923fa8a1 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 7 Jan 2016 01:37:19 -0500 Subject: [PATCH 0302/1113] pass diff and verbosity settings to modules also simplifies and guarantees that all flags are always passed, even when false this should make checks simpler as you always expect them to exist --- lib/ansible/plugins/action/__init__.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 3f4fff588e9..d37631b16e1 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -371,18 +371,24 @@ class ActionBase(with_metaclass(ABCMeta, object)): module_args = self._task.args # set check mode in the module arguments, if required - if self._play_context.check_mode and not self._task.always_run: + if self._play_context.check_mode: if not self._supports_check_mode: raise AnsibleError("check mode is not supported for this operation") module_args['_ansible_check_mode'] = True + else: + module_args['_ansible_check_mode'] = False # set no log in the module arguments, if required - if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG: - module_args['_ansible_no_log'] = True + module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG # set debug in the module arguments, if required - if C.DEFAULT_DEBUG: - module_args['_ansible_debug'] = True + module_args['_ansible_debug'] = C.DEFAULT_DEBUG + + # let module know we are in diff mode + module_args['_ansible_diff'] = self._play_context.diff + + # let module know our verbosity + module_args['_ansible_verbosity'] = self._display.verbosity (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars) if not shebang: From 519952d7d71341c8acf2351124c2459e56279367 Mon Sep 17 00:00:00 2001 From: Michael <miketwo@saucelabs.com> Date: Thu, 7 Jan 2016 00:08:49 -0800 Subject: [PATCH 0303/1113] Fix module name --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 5d664d56313..f945cc24bca 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -191,7 +191,7 @@ a lot shorter than this:: Let's test that module:: - ansible/hacking/test-module -m ./time -a "time=\"March 14 12:23\"" + ansible/hacking/test-module -m ./timetest.py -a "time=\"March 14 12:23\"" This should return something like:: From eb4ab5fa2aa503ede5f9bf20cd4f5a190c95f777 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 7 Jan 2016 08:27:13 -0500 Subject: [PATCH 0304/1113] Revert "Show version without supplying a dummy action" This reverts commit 11b55be5bbb90b2bc917b2637d6fcdbe1a15092d. Parsing before action will fail if one of the action specific options is used As per issue #13743 --- lib/ansible/cli/galaxy.py | 7 ++----- lib/ansible/cli/vault.py | 3 --- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index a022d17859c..476a7d0f897 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -50,7 +50,7 @@ class GalaxyCLI(CLI): SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup") - + def __init__(self, args): self.api = None self.galaxy = None @@ -64,9 +64,6 @@ class GalaxyCLI(CLI): epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) - # Workaround for #12004: show version without supplying a dummy action - self.parser.parse_args() - self.set_action() # options specific to actions @@ -144,7 +141,7 @@ class GalaxyCLI(CLI): return True def run(self): - + super(GalaxyCLI, self).run() # if not offline, get connect to galaxy api diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index 50a6fdebdc8..9908f17e578 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -53,9 +53,6 @@ class VaultCLI(CLI): epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) - # Workaround for #12004: show version without supplying a dummy action - self.parser.parse_args() - self.set_action() # options specific to self.actions From 28be222ff065b0fcbcf2b4beded7ce4030309790 Mon Sep 17 00:00:00 2001 From: Sebastian Thiel <sthiel@thoughtworks.com> Date: Thu, 7 Jan 2016 16:55:41 +0100 Subject: [PATCH 0305/1113] correct invocation of launchctl When I executed `launchctl limit maxfiles 1024 2048` , my entire system would become unusable, as all of the sudden no process could use any file anymore, reporting that the max file limit was reached. Only a hard reboot could fix the problem, which fortunately revealed that the configuration was not saved. The change I made *should* remedy the issue, even though I didn't test it. Therefore I hope you can revise the documentation to be sure nothing bad happens. # Meta Tested on OSX 10.11.2 --- docsite/rst/intro_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index a5ed83a3027..99e2661226c 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -52,7 +52,7 @@ This includes Red Hat, Debian, CentOS, OS X, any of the BSDs, and so on. .. note:: As of 2.0 ansible uses a few more file handles to manage its forks, OS X has a very low setting so if you want to use 15 or more forks - you'll need to raise the ulimit, like so ``sudo launchctl limit maxfiles 1024 2048``. Or just any time you see a "Too many open files" error. + you'll need to raise the ulimit, like so ``sudo launchctl limit maxfiles 1024 unlimited``. Or just any time you see a "Too many open files" error. .. _managed_node_requirements: From 41a417be1f3b47ae4ef27778d175227f476849b1 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 7 Jan 2016 19:02:08 -0500 Subject: [PATCH 0306/1113] noted that regex_escape was added in 2.0 fixes #13759 --- docsite/rst/playbooks_filters.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_filters.rst b/docsite/rst/playbooks_filters.rst index 0bbb5240458..c91b04b3675 100644 --- a/docsite/rst/playbooks_filters.rst +++ b/docsite/rst/playbooks_filters.rst @@ -547,7 +547,7 @@ To match strings against a regex, use the "match" or "search" filter:: To replace text in a string with regex, use the "regex_replace" filter:: - # convert "ansible" to "able" + # convert "ansible" to "able" {{ 'ansible' | regex_replace('^a.*i(.*)$', 'a\\1') }} # convert "foobar" to "bar" @@ -559,11 +559,13 @@ To replace text in a string with regex, use the "regex_replace" filter:: .. note:: Prior to ansible 2.0, if "regex_replace" filter was used with variables inside YAML arguments (as opposed to simpler 'key=value' arguments), then you needed to escape backreferences (e.g. ``\\1``) with 4 backslashes (``\\\\``) instead of 2 (``\\``). +.. versionadded:: 2.0 + To escape special characters within a regex, use the "regex_escape" filter:: # convert '^f.*o(.*)$' to '\^f\.\*o\(\.\*\)\$' {{ '^f.*o(.*)$' | regex_escape() }} - + To make use of one attribute from each item in a list of complex variables, use the "map" filter (see the `Jinja2 map() docs`_ for more):: # get a comma-separated list of the mount points (e.g. "/,/mnt/stuff") on a host From 45355cd5660da17350811d92a5144077910ea434 Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Thu, 7 Jan 2016 16:23:55 -0800 Subject: [PATCH 0307/1113] convert winrm put_file script template to Unicode string literal Fixes traceback on homedirs with non-ascii chars --- lib/ansible/plugins/connection/winrm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index 622231675d0..dec48787408 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -271,7 +271,7 @@ class Connection(ConnectionBase): if not os.path.exists(in_path): raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path) - script_template = ''' + script_template = u''' begin {{ $path = "{0}" From b0fe70538406334f21708401558e4e2e4292658f Mon Sep 17 00:00:00 2001 From: Emil Lind <emil@sys.nu> Date: Fri, 8 Jan 2016 13:52:44 +0100 Subject: [PATCH 0308/1113] Allow InventoryScript JSON with childgroups only and without hosts and vars Without this patch, the simplified syntax is triggered when a group is defined like this: "platforms": { "children": [ "cloudstack" ] } Which results in a group 'platforms' with 1 host 'platforms'. more details in https://github.com/ansible/ansible/issues/13655 --- lib/ansible/inventory/script.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index 1fa49e50537..042fa8c24a2 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -103,7 +103,7 @@ class InventoryScript: if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars - elif not any(k in data for k in ('hosts','vars')): + elif not any(k in data for k in ('hosts','vars','children')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: From 1d240902c6d0876e1e134f75dcb898d121d01bfb Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Thu, 7 Jan 2016 23:54:31 -0500 Subject: [PATCH 0309/1113] adds shared module shell for creating cli based transports This commit add a new shared module shell that is used to build connections to network devices that operate in a CLI environment. This commit supercedes the issh.py and cli.py commits and removes them from module_utils. --- .../module_utils/{issh.py => shell.py} | 165 +++++++++--------- 1 file changed, 82 insertions(+), 83 deletions(-) rename lib/ansible/module_utils/{issh.py => shell.py} (56%) diff --git a/lib/ansible/module_utils/issh.py b/lib/ansible/module_utils/shell.py similarity index 56% rename from lib/ansible/module_utils/issh.py rename to lib/ansible/module_utils/shell.py index 00922ef8cdd..0107911ba02 100644 --- a/lib/ansible/module_utils/issh.py +++ b/lib/ansible/module_utils/shell.py @@ -16,57 +16,43 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # -""" -Ansible shared module for building modules that require an interactive -SSH Shell such as those for command line driven devices. This module -provides a native SSH transport using paramiko and builds a base Shell -class for creating shell driven modules. - -In order to use this module, include it as part of a custom -module as shown below and create and subclass Shell. - -** Note: The order of the import statements does matter. ** - -from ansible.module_utils.basic import * -from ansible.module_utils.ssh import * - -This module provides the following common argument spec for creating -shell connections: - - * host (str) - [Required] The IPv4 address or FQDN of the device - - * port (int) - Overrides the default SSH port. - - * username (str) - [Required] The username to use to authenticate - the SSH session. - - * password (str) - [Required] The password to use to authenticate - the SSH session - - * connect_timeout (int) - Specifies the connection timeout in seconds - -""" import re import socket from StringIO import StringIO -import paramiko +try: + import paramiko + HAS_PARAMIKO = True +except ImportError: + HAS_PARAMIKO = False -def shell_argument_spec(spec=None): - """ Generates an argument spec for the Shell class - """ - arg_spec = dict( - host=dict(required=True), - port=dict(default=22, type='int'), - username=dict(required=True), - password=dict(required=True), - connect_timeout=dict(default=10, type='int'), - ) - if spec: - arg_spec.update(spec) - return arg_spec +ANSI_RE = re.compile(r'(\x1b\[\?1h\x1b=)') + +CLI_PROMPTS_RE = [ + re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*[>|#](?:\s*)$'), + re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*\(.+\)#(?:\s*)$') +] + +CLI_ERRORS_RE = [ + re.compile(r"% ?Error"), + re.compile(r"^% \w+", re.M), + re.compile(r"% ?Bad secret"), + re.compile(r"invalid input", re.I), + re.compile(r"(?:incomplete|ambiguous) command", re.I), + re.compile(r"connection timed out", re.I), + re.compile(r"[^\r\n]+ not found", re.I), + re.compile(r"'[^']' +returned error code: ?\d+"), +] + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() class ShellError(Exception): @@ -75,7 +61,6 @@ class ShellError(Exception): self.message = msg self.command = command - class Command(object): def __init__(self, command, prompt=None, response=None): @@ -86,59 +71,47 @@ class Command(object): def __str__(self): return self.command -class Ssh(object): +class Shell(object): def __init__(self): - self.client = None + self.ssh = None + self.shell = None + + self.prompts = list() + self.prompts.extend(CLI_PROMPTS_RE) + + self.errors = list() + self.errors.extend(CLI_ERRORS_RE) def open(self, host, port=22, username=None, password=None, timeout=10, key_filename=None): - ssh = paramiko.SSHClient() - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.ssh = paramiko.SSHClient() + self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) use_keys = password is None - ssh.connect(host, port=port, username=username, password=password, + self.ssh.connect(host, port=port, username=username, password=password, timeout=timeout, allow_agent=use_keys, look_for_keys=use_keys, key_filename=key_filename) - self.client = ssh - return self.on_open() - - def on_open(self): - pass - - def close(self): - self.client.close() - return self.on_close() - - def on_close(self): - pass - - -class Shell(Ssh): - - def __init__(self): - super(Shell, self).__init__() - self.shell = None - - self.prompts = list() - self.errors = list() - - def on_open(self): - self.shell = self.client.invoke_shell() + self.shell = self.ssh.invoke_shell() self.shell.settimeout(10) self.receive() + def strip(self, data): + return ANSI_RE.sub('', data) + def receive(self, cmd=None): recv = StringIO() while True: - recv.write(self.shell.recv(200)) + data = self.shell.recv(200) + + recv.write(data) recv.seek(recv.tell() - 200) - window = recv.read() + window = self.strip(recv.read()) if isinstance(cmd, Command): self.handle_input(window, prompt=cmd.prompt, @@ -146,19 +119,25 @@ class Shell(Ssh): try: if self.read(window): - resp = recv.getvalue() + resp = self.strip(recv.getvalue()) return self.sanitize(cmd, resp) except ShellError, exc: exc.command = cmd raise - def send(self, command): + def send(self, commands): + responses = list() try: - cmd = '%s\r' % str(command) - self.shell.sendall(cmd) - return self.receive(command) + for command in to_list(commands): + cmd = '%s\r' % str(command) + self.shell.sendall(cmd) + responses.append(self.receive(command)) except socket.timeout, exc: raise ShellError("timeout trying to send command", cmd) + return responses + + def close(self): + self.shell.close() def handle_input(self, resp, prompt, response): if not prompt or not response: @@ -184,11 +163,31 @@ class Shell(Ssh): def read(self, response): for regex in self.errors: if regex.search(response): - raise ShellError('{}'.format(response)) + raise ShellError('%s' % response) for regex in self.prompts: if regex.search(response): return True +def get_cli_connection(module): + host = module.params['host'] + port = module.params['port'] + if not port: + port = 22 + username = module.params['username'] + password = module.params['password'] + + try: + cli = Cli() + cli.open(host, port=port, username=username, password=password) + except paramiko.ssh_exception.AuthenticationException, exc: + module.fail_json(msg=exc.message) + except socket.error, exc: + host = '%s:%s' % (host, port) + module.fail_json(msg=exc.strerror, errno=exc.errno, host=host) + except socket.timeout: + module.fail_json(msg='socket timed out') + + return cli From 737090dd139edbce0a7e90d4ba136bdc19dd6346 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 6 Jan 2016 10:29:35 -0500 Subject: [PATCH 0310/1113] now show full callback stacktrace when vvv+ Still is a warning as we don't want to repeat it multiple times nor additional callbacks to stop ansible execution. hopefully we can avoid shipping w/o exceptions in the default/minimal callbacks... Also added feature that now allows for 'preformated' strings passed to warning --- lib/ansible/executor/task_queue_manager.py | 7 ++++++- lib/ansible/utils/display.py | 13 +++++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index dae70a12925..ab46d6f78b8 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -290,8 +290,13 @@ class TaskQueueManager: try: method(*args, **kwargs) except Exception as e: + import traceback + orig_tb = traceback.format_exc() try: v1_method = method.replace('v2_','') v1_method(*args, **kwargs) except Exception: - display.warning('Error when using %s: %s' % (method, str(e))) + if display.verbosity >= 3: + display.warning(orig_tb, formatted=True) + else: + display.warning('Error when using %s: %s' % (method, str(e))) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 8700a510186..ef1ac57b04a 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -202,10 +202,15 @@ class Display: self.display(new_msg.strip(), color=C.COLOR_DEPRECATE, stderr=True) self._deprecations[new_msg] = 1 - def warning(self, msg): - new_msg = "\n[WARNING]: %s" % msg - wrapped = textwrap.wrap(new_msg, self.columns) - new_msg = "\n".join(wrapped) + "\n" + def warning(self, msg, formatted=False): + + if not formatted: + new_msg = "\n[WARNING]: %s" % msg + wrapped = textwrap.wrap(new_msg, self.columns) + new_msg = "\n".join(wrapped) + "\n" + else: + new_msg = "\n[WARNING]: \n%s" % msg + if new_msg not in self._warns: self.display(new_msg, color=C.COLOR_WARN, stderr=True) self._warns[new_msg] = 1 From 1cc5ac06e70630f61423aa1576234c54faabf8cd Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 8 Jan 2016 11:43:27 -0500 Subject: [PATCH 0311/1113] restructure vars_prompt and fix regression pushed it to use the existing propmpt from display and moved the vars prompt code there also for uniformity changed vars_prompt to check extra vars vs the empty play.vars to restore 1.9 behaviour sipmlified the code as it didn't need to check for syntax again (tqm is made none prior based on that) fixes #13770 --- lib/ansible/executor/playbook_executor.py | 54 ++--------------------- lib/ansible/utils/display.py | 47 +++++++++++++++++++- 2 files changed, 49 insertions(+), 52 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 60a416af73d..eecaa66a62c 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -111,13 +111,12 @@ class PlaybookExecutor: salt_size = var.get("salt_size", None) salt = var.get("salt", None) - if vname not in play.vars: + if vname not in self._variable_manager.extra_vars: + self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) if self._tqm: - self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) - if self._options.syntax: + play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) + else: # we are either in --list-<option> or syntax check play.vars[vname] = default - else: - play.vars[vname] = self._do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. @@ -237,48 +236,3 @@ class PlaybookExecutor: return serialized_batches - def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): - - if sys.__stdin__.isatty(): - if prompt and default is not None: - msg = "%s [%s]: " % (prompt, default) - elif prompt: - msg = "%s: " % prompt - else: - msg = 'input for %s: ' % varname - - def do_prompt(prompt, private): - if sys.stdout.encoding: - msg = prompt.encode(sys.stdout.encoding) - else: - # when piping the output, or at other times when stdout - # may not be the standard file descriptor, the stdout - # encoding may not be set, so default to something sane - msg = prompt.encode(locale.getpreferredencoding()) - if private: - return getpass.getpass(msg) - return raw_input(msg) - - if confirm: - while True: - result = do_prompt(msg, private) - second = do_prompt("confirm " + msg, private) - if result == second: - break - display.display("***** VALUES ENTERED DO NOT MATCH ****") - else: - result = do_prompt(msg, private) - else: - result = None - display.warning("Not prompting as we are not in interactive mode") - - # if result is false and default is not None - if not result and default is not None: - result = default - - if encrypt: - result = do_encrypt(result, encrypt, salt_size, salt) - - # handle utf-8 chars - result = to_unicode(result, errors='strict') - return result diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index ef1ac57b04a..ef5a4bc6872 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -267,13 +267,56 @@ class Display: self._errors[new_msg] = 1 @staticmethod - def prompt(msg): + def prompt(msg, private=False): prompt_string = to_bytes(msg, encoding=Display._output_encoding()) if sys.version_info >= (3,): # Convert back into text on python3. We do this double conversion # to get rid of characters that are illegal in the user's locale prompt_string = to_unicode(prompt_string) - return input(prompt_string) + + if private: + return getpass.getpass(msg) + else: + return input(prompt_string) + + @classmethod + def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + + result = None + if sys.__stdin__.isatty(): + + do_prompt = self.prompt + + if prompt and default is not None: + msg = "%s [%s]: " % (prompt, default) + elif prompt: + msg = "%s: " % prompt + else: + msg = 'input for %s: ' % varname + + if confirm: + while True: + result = do_prompt(msg, private) + second = do_prompt("confirm " + msg, private) + if result == second: + break + display.display("***** VALUES ENTERED DO NOT MATCH ****") + else: + result = do_prompt(msg, private) + else: + result = None + display.warning("Not prompting as we are not in interactive mode") + + # if result is false and default is not None + if not result and default is not None: + result = default + + if encrypt: + result = do_encrypt(result, encrypt, salt_size, salt) + + # handle utf-8 chars + result = to_unicode(result, errors='strict') + return result @staticmethod def _output_encoding(stderr=False): From 5f0a348447809b71254d21a652dd39f7d0e995f0 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Fri, 8 Jan 2016 11:37:28 -0600 Subject: [PATCH 0312/1113] Restore ability for a module to specify WANT_JSON --- lib/ansible/plugins/action/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 3f4fff588e9..6c65716bbf8 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -397,7 +397,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): if tmp: remote_module_filename = self._connection._shell.get_remote_filename(module_name) remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename) - if module_style == 'old': + if module_style in ['old', 'non_native_want_json']: # we'll also need a temp file to hold our module arguments args_file_path = self._connection._shell.join_path(tmp, 'args') @@ -411,6 +411,8 @@ class ActionBase(with_metaclass(ABCMeta, object)): for k,v in iteritems(module_args): args_data += '%s="%s" ' % (k, pipes.quote(text_type(v))) self._transfer_data(args_file_path, args_data) + elif module_style == 'non_native_want_json': + self._transfer_data(args_file_path, json.dumps(module_args)) display.debug("done transferring module to remote") environment_string = self._compute_environment_string() From 87ccc5c869433a229c56e37d243aab5215d0874b Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Thu, 7 Jan 2016 23:27:37 -0500 Subject: [PATCH 0313/1113] initial add of eos shared module This adds a shared module for communicating with Arista EOS devices over SSH (cli) or JSON-RPC (eapi). This modules replaces the eapi.py module previously added to module_utils. This commit includes a documentation fragment that describes the eos common arguments --- lib/ansible/module_utils/eapi.py | 174 -------------- lib/ansible/module_utils/eos.py | 215 ++++++++++++++++++ .../utils/module_docs_fragments/eos.py | 84 +++++++ 3 files changed, 299 insertions(+), 174 deletions(-) delete mode 100644 lib/ansible/module_utils/eapi.py create mode 100644 lib/ansible/module_utils/eos.py create mode 100644 lib/ansible/utils/module_docs_fragments/eos.py diff --git a/lib/ansible/module_utils/eapi.py b/lib/ansible/module_utils/eapi.py deleted file mode 100644 index 6e6129798cd..00000000000 --- a/lib/ansible/module_utils/eapi.py +++ /dev/null @@ -1,174 +0,0 @@ -# -# (c) 2015 Peter Sprygada, <psprygada@ansible.com> -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. -# -""" -This module adds shared support for Arista EOS devices using eAPI over -HTTP/S transport. It is built on module_utils/urls.py which is required -for proper operation. - -In order to use this module, include it as part of a custom -module as shown below. - -** Note: The order of the import statements does matter. ** - -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.eapi import * - -The eapi module provides the following common argument spec: - - * host (str) - The IPv4 address or FQDN of the network device - * port (str) - Overrides the default port to use for the HTTP/S - connection. The default values are 80 for HTTP and - 443 for HTTPS - * username (str) - The username to use to authenticate the HTTP/S - connection. - * password (str) - The password to use to authenticate the HTTP/S - connection. - * use_ssl (bool) - Specifies whether or not to use an encrypted (HTTPS) - connection or not. The default value is False. - * enable_mode (bool) - Specifies whether or not to enter `enable` mode - prior to executing the command list. The default value is True - * enable_password (str) - The password for entering `enable` mode - on the switch if configured. - * device (dict) - Used to send the entire set of connectin parameters - as a dict object. This argument is mutually exclusive with the - host argument - -In order to communicate with Arista EOS devices, the eAPI feature -must be enabled and configured on the device. - -""" -EAPI_COMMON_ARGS = dict( - host=dict(), - port=dict(), - username=dict(), - password=dict(no_log=True), - use_ssl=dict(default=True, type='bool'), - enable_mode=dict(default=True, type='bool'), - enable_password=dict(no_log=True), - device=dict() -) - -def eapi_module(**kwargs): - """Append the common args to the argument_spec - """ - spec = kwargs.get('argument_spec') or dict() - - argument_spec = url_argument_spec() - argument_spec.update(EAPI_COMMON_ARGS) - if kwargs.get('argument_spec'): - argument_spec.update(kwargs['argument_spec']) - kwargs['argument_spec'] = argument_spec - - module = AnsibleModule(**kwargs) - - device = module.params.get('device') or dict() - for key, value in device.iteritems(): - if key in EAPI_COMMON_ARGS: - module.params[key] = value - - params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - for key, value in params.iteritems(): - if key != 'device': - module.params[key] = value - - return module - -def eapi_url(params): - """Construct a valid Arista eAPI URL - """ - if params['use_ssl']: - proto = 'https' - else: - proto = 'http' - host = params['host'] - url = '{}://{}'.format(proto, host) - if params['port']: - url = '{}:{}'.format(url, params['port']) - return '{}/command-api'.format(url) - -def to_list(arg): - """Convert the argument to a list object - """ - if isinstance(arg, (list, tuple)): - return list(arg) - elif arg is not None: - return [arg] - else: - return [] - -def eapi_body(commands, encoding, reqid=None): - """Create a valid eAPI JSON-RPC request message - """ - params = dict(version=1, cmds=to_list(commands), format=encoding) - return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params) - -def eapi_enable_mode(params): - """Build commands for entering `enable` mode on the switch - """ - if params['enable_mode']: - passwd = params['enable_password'] - if passwd: - return dict(cmd='enable', input=passwd) - else: - return 'enable' - -def eapi_command(module, commands, encoding='json'): - """Send an ordered list of commands to the device over eAPI - """ - commands = to_list(commands) - url = eapi_url(module.params) - - enable = eapi_enable_mode(module.params) - if enable: - commands.insert(0, enable) - - data = eapi_body(commands, encoding) - data = module.jsonify(data) - - headers = {'Content-Type': 'application/json-rpc'} - - module.params['url_username'] = module.params['username'] - module.params['url_password'] = module.params['password'] - - response, headers = fetch_url(module, url, data=data, headers=headers, - method='POST') - - if headers['status'] != 200: - module.fail_json(**headers) - - response = module.from_json(response.read()) - if 'error' in response: - err = response['error'] - module.fail_json(msg='json-rpc error', **err) - - if enable: - response['result'].pop(0) - - return response['result'], headers - -def eapi_configure(module, commands): - """Send configuration commands to the device over eAPI - """ - commands.insert(0, 'configure') - response, headers = eapi_command(module, commands) - response.pop(0) - return response, headers - - diff --git a/lib/ansible/module_utils/eos.py b/lib/ansible/module_utils/eos.py new file mode 100644 index 00000000000..e3782a9d097 --- /dev/null +++ b/lib/ansible/module_utils/eos.py @@ -0,0 +1,215 @@ +# +# (c) 2015 Peter Sprygada, <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I) + +NET_COMMON_ARGS = dict( + host=dict(required=True), + port=dict(type='int'), + username=dict(required=True), + password=dict(no_log=True), + authorize=dict(default=False, type='bool'), + auth_pass=dict(no_log=True), + transport=dict(choices=['cli', 'eapi']), + use_ssl=dict(default=True, type='bool') +) + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + +class Eapi(object): + + def __init__(self, module): + self.module = module + + # sets the module_utils/urls.py req parameters + self.module.params['url_username'] = module.params['username'] + self.module.params['url_password'] = module.params['password'] + + self.url = None + self.enable = None + + def _get_body(self, commands, encoding, reqid=None): + """Create a valid eAPI JSON-RPC request message + """ + params = dict(version=1, cmds=commands, format=encoding) + return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params) + + def connect(self): + host = self.module.params['host'] + port = self.module.params['port'] + + if self.module.params['use_ssl']: + proto = 'https' + if not port: + port = 443 + else: + proto = 'http' + if not port: + port = 80 + + self.url = '%s://%s:%s/command-api' % (proto, host, port) + + def authorize(self): + if self.module.params['auth_pass']: + passwd = self.module.params['auth_pass'] + self.enable = dict(cmd='enable', input=passwd) + else: + self.enable = 'enable' + + def send(self, commands, encoding='json'): + """Send commands to the device. + """ + clist = to_list(commands) + + if self.enable is not None: + clist.insert(0, self.enable) + + data = self._get_body(clist, encoding) + data = self.module.jsonify(data) + + headers = {'Content-Type': 'application/json-rpc'} + + response, headers = fetch_url(self.module, self.url, data=data, + headers=headers, method='POST') + + if headers['status'] != 200: + self.module.fail_json(**headers) + + response = self.module.from_json(response.read()) + if 'error' in response: + err = response['error'] + self.module.fail_json(msg='json-rpc error', **err) + + if self.enable: + response['result'].pop(0) + + return response['result'] + +class Cli(object): + + def __init__(self, module): + self.module = module + self.shell = None + + def connect(self, **kwargs): + host = self.module.params['host'] + port = self.module.params['port'] or 22 + + username = self.module.params['username'] + password = self.module.params['password'] + + self.shell = Shell() + self.shell.open(host, port=port, username=username, password=password) + + def authorize(self): + passwd = self.module.params['auth_pass'] + self.send(Command('enable', prompt=NET_PASSWD_RE, response=passwd)) + + def send(self, commands, encoding='text'): + return self.shell.send(commands) + +class EosModule(AnsibleModule): + + def __init__(self, *args, **kwargs): + super(EosModule, self).__init__(*args, **kwargs) + self.connection = None + self._config = None + + @property + def config(self): + if not self._config: + self._config = self.get_config() + return self._config + + def connect(self): + if self.params['transport'] == 'eapi': + self.connection = Eapi(self) + else: + self.connection = Cli(self) + + try: + self.connection.connect() + self.execute('terminal length 0') + + if self.params['authorize']: + self.connection.authorize() + + except Exception, exc: + self.fail_json(msg=exc.message) + + def configure(self, commands): + commands = to_list(commands) + commands.insert(0, 'configure terminal') + responses = self.execute(commands) + responses.pop(0) + return responses + + def execute(self, commands, **kwargs): + try: + return self.connection.send(commands, **kwargs) + except Exception, exc: + self.fail_json(msg=exc.message, commands=commands) + + def disconnect(self): + self.connection.close() + + def parse_config(self, cfg): + return parse(cfg, indent=3) + + def get_config(self): + cmd = 'show running-config' + if self.params['include_defaults']: + cmd += ' all' + if self.params['transport'] == 'cli': + return self.execute(cmd)[0] + else: + resp = self.execute(cmd, encoding='text') + return resp[0]['output'] + + +def get_module(**kwargs): + """Return instance of EosModule + """ + + argument_spec = NET_COMMON_ARGS.copy() + if kwargs.get('argument_spec'): + argument_spec.update(kwargs['argument_spec']) + kwargs['argument_spec'] = argument_spec + kwargs['check_invalid_arguments'] = False + + module = EosModule(**kwargs) + + # HAS_PARAMIKO is set by module_utils/shell.py + if module.params['transport'] == 'cli' and not HAS_PARAMIKO: + module.fail_json(msg='paramiko is required but does not appear to be installed') + + # copy in values from local action. + params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + for key, value in params.iteritems(): + module.params[key] = value + + module.connect() + + return module + diff --git a/lib/ansible/utils/module_docs_fragments/eos.py b/lib/ansible/utils/module_docs_fragments/eos.py new file mode 100644 index 00000000000..7cca8b2a781 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/eos.py @@ -0,0 +1,84 @@ +# +# (c) 2015, Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = """ +options: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when buiding the connection to the remote + device. This value applies to either I(cli) or I(eapi). The port + value will default to the approriate transport common port if + none is provided in the task. (cli=22, http=80, https=443). + required: false + default: 0 (use common port) + username: + description: + - Configures the usename to use to authenticate the connection to + the remote device. The value of I(username) is used to authenticate + either the CLI login or the eAPI authentication depending on which + transport is used. + required: true + password: + description: + - Specifies the password to use when authentication the connection to + the remote device. This is a common argument used for either I(cli) + or I(eapi) transports. + required: false + default: null + authorize: + description: + - Instructs the module to enter priviledged mode on the remote device + before sending any commands. If not specified, the device will + attempt to excecute all commands in non-priviledged mode. + required: false + default: false + choices: BOOLEANS + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing + required: false + default: none + transport: + description: + - Configures the transport connection to use when connecting to the + remote device. The transport argument supports connectivity to the + device over cli (ssh) or eapi. + required: true + default: cli + use_ssl: + description: + - Configures the I(transport) to use SSL if set to true only when the + I(transport) argument is configured as eapi. If the transport + argument is not eapi, this value is ignored + required: false + default: true + choices: BOOLEANS + +""" From d1dacfb3cabad190429b287d1d8256e46d10bcd3 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Thu, 7 Jan 2016 23:27:56 -0500 Subject: [PATCH 0314/1113] updates the ios shared module with new shell This update refactor the ios shared module to use the new shell shared library instead of issh and cli. It also adds the ios documentation fragment to be used when building ios based modules. --- lib/ansible/module_utils/ios.py | 225 +++++++----------- .../utils/module_docs_fragments/ios.py | 67 ++++++ 2 files changed, 156 insertions(+), 136 deletions(-) create mode 100644 lib/ansible/utils/module_docs_fragments/ios.py diff --git a/lib/ansible/module_utils/ios.py b/lib/ansible/module_utils/ios.py index 085b68dcd28..550a2de6d53 100644 --- a/lib/ansible/module_utils/ios.py +++ b/lib/ansible/module_utils/ios.py @@ -16,165 +16,118 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # -""" -Adds shared module support for connecting to and configuring Cisco -IOS devices. This shared module builds on module_utils/ssh.py and -implements the Shell object. -** Note: The order of the import statements does matter. ** +NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I) -from ansible.module_utils.basic import * -from ansible.module_utils.ssh import * -from ansible.module_utils.ios import * - -This module provides the following common argument spec for creating -ios connections: - - * enable_mode (bool) - Forces the shell connection into IOS enable mode - - * enable_password (str) - Configures the IOS enable mode password to be - send to the device to authorize the session - - * device (dict) - Accepts the set of configuration parameters as a - dict object - -Note: These shared arguments are in addition to the arguments provided by -the module_utils/ssh.py shared module - -""" -import socket - -IOS_PROMPTS_RE = [ - re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*[>|#](?:\s*)$'), - re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*\(.+\)#$'), - re.compile(r'\x1b.*$') -] - -IOS_ERRORS_RE = [ - re.compile(r"% ?Error"), - re.compile(r"^% \w+", re.M), - re.compile(r"% ?Bad secret"), - re.compile(r"invalid input", re.I), - re.compile(r"(?:incomplete|ambiguous) command", re.I), - re.compile(r"connection timed out", re.I), - re.compile(r"[^\r\n]+ not found", re.I), - re.compile(r"'[^']' +returned error code: ?\d+"), -] - -IOS_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I) - -IOS_COMMON_ARGS = dict( - host=dict(), - port=dict(type='int', default=22), - username=dict(), - password=dict(), - enable_mode=dict(default=False, type='bool'), - enable_password=dict(), - connect_timeout=dict(type='int', default=10), - device=dict() +NET_COMMON_ARGS = dict( + host=dict(required=True), + port=dict(default=22, type='int'), + username=dict(required=True), + password=dict(no_log=True), + authorize=dict(default=False, type='bool'), + auth_pass=dict(no_log=True), ) - -def ios_module(**kwargs): - """Append the common args to the argument_spec - """ - spec = kwargs.get('argument_spec') or dict() - - argument_spec = shell_argument_spec() - argument_spec.update(IOS_COMMON_ARGS) - if kwargs.get('argument_spec'): - argument_spec.update(kwargs['argument_spec']) - kwargs['argument_spec'] = argument_spec - - module = AnsibleModule(**kwargs) - - device = module.params.get('device') or dict() - for key, value in device.iteritems(): - if key in IOS_COMMON_ARGS: - module.params[key] = value - - params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - for key, value in params.iteritems(): - if key != 'device': - module.params[key] = value - - return module - -def to_list(arg): - """Try to force the arg to a list object - """ - if isinstance(arg, (list, tuple)): - return list(arg) - elif arg is not None: - return [arg] +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] else: - return [] + return list() -class IosShell(object): +class Cli(object): - def __init__(self): + def __init__(self, module): + self.module = module + self.shell = None + + def connect(self, **kwargs): + host = self.module.params['host'] + port = self.module.params['port'] or 22 + + username = self.module.params['username'] + password = self.module.params['password'] + + self.shell = Shell() + self.shell.open(host, port=port, username=username, password=password) + + def authorize(self): + passwd = self.module.params['auth_pass'] + self.send(Command('enable', prompt=NET_PASSWD_RE, response=passwd)) + + def send(self, commands): + return self.shell.send(commands) + +class IosModule(AnsibleModule): + + def __init__(self, *args, **kwargs): + super(IosModule, self).__init__(*args, **kwargs) self.connection = None + self._config = None - def connect(self, host, username, password, **kwargs): - port = kwargs.get('port') or 22 - timeout = kwargs.get('timeout') or 10 + @property + def config(self): + if not self._config: + self._config = self.get_config() + return self._config - self.connection = Shell() + def connect(self): + try: + self.connection = Cli(self) + self.connection.connect() + self.execute('terminal length 0') - self.connection.prompts.extend(IOS_PROMPTS_RE) - self.connection.errors.extend(IOS_ERRORS_RE) + if self.params['authorize']: + self.connection.authorize() - self.connection.open(host, port=port, username=username, - password=password, timeout=timeout) - - def authorize(self, passwd=None): - command = Command('enable', prompt=IOS_PASSWD_RE, response=passwd) - self.send(command) + except Exception, exc: + self.fail_json(msg=exc.message) def configure(self, commands): commands = to_list(commands) - commands.insert(0, 'configure terminal') - commands.append('end') - - resp = self.send(commands) - resp.pop(0) - resp.pop() - - return resp - - def send(self, commands): - responses = list() - for cmd in to_list(commands): - response = self.connection.send(cmd) - responses.append(response) + responses = self.execute(commands) + responses.pop(0) return responses -def ios_connection(module): - """Creates a connection to an IOS device based on the module arguments + def execute(self, commands, **kwargs): + return self.connection.send(commands) + + def disconnect(self): + self.connection.close() + + def parse_config(self, cfg): + return parse(cfg, indent=1) + + def get_config(self): + cmd = 'show running-config' + if self.params['include_defaults']: + cmd += ' all' + return self.execute(cmd)[0] + +def get_module(**kwargs): + """Return instance of IosModule """ - host = module.params['host'] - port = module.params['port'] - username = module.params['username'] - password = module.params['password'] + argument_spec = NET_COMMON_ARGS.copy() + if kwargs.get('argument_spec'): + argument_spec.update(kwargs['argument_spec']) + kwargs['argument_spec'] = argument_spec + kwargs['check_invalid_arguments'] = False - timeout = module.params['connect_timeout'] + module = IosModule(**kwargs) - try: - shell = IosShell() - shell.connect(host, port=port, username=username, password=password, - timeout=timeout) - shell.send('terminal length 0') - except paramiko.ssh_exception.AuthenticationException, exc: - module.fail_json(msg=exc.message) - except socket.error, exc: - module.fail_json(msg=exc.strerror, errno=exc.errno) + # HAS_PARAMIKO is set by module_utils/shell.py + if not HAS_PARAMIKO: + module.fail_json(msg='paramiko is required but does not appear to be installed') - if module.params['enable_mode']: - shell.authorize(module.params['enable_password']) - - return shell + # copy in values from local action. + params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + for key, value in params.iteritems(): + module.params[key] = value + module.connect() + return module diff --git a/lib/ansible/utils/module_docs_fragments/ios.py b/lib/ansible/utils/module_docs_fragments/ios.py new file mode 100644 index 00000000000..5f07bbfde76 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/ios.py @@ -0,0 +1,67 @@ +# +# (c) 2015, Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = """ +options: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when buiding the connection to the remote + device. The port value will default to the well known SSH port + of 22 + required: false + default: 22 + username: + description: + - Configures the usename to use to authenticate the connection to + the remote device. The value of I(username) is used to authenticate + the SSH session + required: true + password: + description: + - Specifies the password to use when authentication the connection to + the remote device. The value of I(password) is used to authenticate + the SSH session + required: false + default: null + authorize: + description: + - Instructs the module to enter priviledged mode on the remote device + before sending any commands. If not specified, the device will + attempt to excecute all commands in non-priviledged mode. + required: false + default: false + choices: BOOLEANS + auth_pass: + description: + - Specifies the password to use if required to enter privileged mode + on the remote device. If I(authorize) is false, then this argument + does nothing + required: false + default: none + +""" From 456b3d2c23e6218c6c3a1b718f940e4830f8e5c9 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Fri, 8 Jan 2016 14:00:23 -0500 Subject: [PATCH 0315/1113] adds new iosxr shared module for developing modules that work with IOS XR devices This commit adds a new shared module for working with Cisco IOS XR devices over CLI (SSH). It also provides a documentation fragement for the commmon arguments provided by the iosxr module. --- lib/ansible/module_utils/iosxr.py | 121 ++++++++++++++++++ .../utils/module_docs_fragments/iosxr.py | 52 ++++++++ 2 files changed, 173 insertions(+) create mode 100644 lib/ansible/module_utils/iosxr.py create mode 100644 lib/ansible/utils/module_docs_fragments/iosxr.py diff --git a/lib/ansible/module_utils/iosxr.py b/lib/ansible/module_utils/iosxr.py new file mode 100644 index 00000000000..9686adc7f5f --- /dev/null +++ b/lib/ansible/module_utils/iosxr.py @@ -0,0 +1,121 @@ +# +# (c) 2015 Peter Sprygada, <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# + +NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I) + +NET_COMMON_ARGS = dict( + host=dict(required=True), + port=dict(default=22, type='int'), + username=dict(required=True), + password=dict(no_log=True) +) + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + +class Cli(object): + + def __init__(self, module): + self.module = module + self.shell = None + + def connect(self, **kwargs): + host = self.module.params['host'] + port = self.module.params['port'] or 22 + + username = self.module.params['username'] + password = self.module.params['password'] + + self.shell = Shell() + self.shell.open(host, port=port, username=username, password=password) + + def send(self, commands): + return self.shell.send(commands) + +class IosxrModule(AnsibleModule): + + def __init__(self, *args, **kwargs): + super(IosxrModule, self).__init__(*args, **kwargs) + self.connection = None + self._config = None + + @property + def config(self): + if not self._config: + self._config = self.get_config() + return self._config + + def connect(self): + try: + self.connection = Cli(self) + self.connection.connect() + self.execute('terminal length 0') + except Exception, exc: + self.fail_json(msg=exc.message) + + def configure(self, commands): + commands = to_list(commands) + commands.insert(0, 'configure terminal') + commands.append('commit') + responses = self.execute(commands) + responses.pop(0) + responses.pop() + return responses + + def execute(self, commands, **kwargs): + return self.connection.send(commands) + + def disconnect(self): + self.connection.close() + + def parse_config(self, cfg): + return parse(cfg, indent=1) + + def get_config(self): + return self.execute('show running-config')[0] + +def get_module(**kwargs): + """Return instance of IosxrModule + """ + + argument_spec = NET_COMMON_ARGS.copy() + if kwargs.get('argument_spec'): + argument_spec.update(kwargs['argument_spec']) + kwargs['argument_spec'] = argument_spec + kwargs['check_invalid_arguments'] = False + + module = IosxrModule(**kwargs) + + if not HAS_PARAMIKO: + module.fail_json(msg='paramiko is required but does not appear to be installed') + + # copy in values from local action. + params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + for key, value in params.iteritems(): + module.params[key] = value + + module.connect() + + return module + diff --git a/lib/ansible/utils/module_docs_fragments/iosxr.py b/lib/ansible/utils/module_docs_fragments/iosxr.py new file mode 100644 index 00000000000..cb9ba28fc2a --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/iosxr.py @@ -0,0 +1,52 @@ +# +# (c) 2015, Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = """ +options: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when buiding the connection to the remote + device. The port value will default to the well known SSH port + of 22 + required: false + default: 22 + username: + description: + - Configures the usename to use to authenticate the connection to + the remote device. The value of I(username) is used to authenticate + the SSH session + required: true + password: + description: + - Specifies the password to use when authentication the connection to + the remote device. The value of I(password) is used to authenticate + the SSH session + required: false + default: null + + """ From e7804a6fdd0df529e9f58d1557876b9ca11077be Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 8 Jan 2016 16:25:10 -0500 Subject: [PATCH 0316/1113] fixed test to use hostvars and not expect vars sub vars is at the same level as hostvars and was only included in hostvars temporarily as a bug --- test/integration/roles/test_filters/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_filters/tasks/main.yml b/test/integration/roles/test_filters/tasks/main.yml index cb1549d3f78..aab16029208 100644 --- a/test/integration/roles/test_filters/tasks/main.yml +++ b/test/integration/roles/test_filters/tasks/main.yml @@ -77,4 +77,4 @@ - "31 == ['x','y']|map('extract',{'x':42,'y':31})|list|last" - "'local' == ['localhost']|map('extract',hostvars,'ansible_connection')|list|first" - "'local' == ['localhost']|map('extract',hostvars,['ansible_connection'])|list|first" - - "'amazon' == ['localhost']|map('extract',hostvars,['vars','group_names',0])|list|first" + - "'amazon' == ['localhost']|map('extract',hostvars,['group_names',0])|list|first" From 749fbd43efbc8d28ccde206f701e91ba3a2e14ad Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 7 Jan 2016 12:08:21 -0500 Subject: [PATCH 0317/1113] Removing test for map filter until we can figure out how that's supposed to work --- test/integration/roles/test_filters/tasks/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/test/integration/roles/test_filters/tasks/main.yml b/test/integration/roles/test_filters/tasks/main.yml index aab16029208..6d75c0d81cf 100644 --- a/test/integration/roles/test_filters/tasks/main.yml +++ b/test/integration/roles/test_filters/tasks/main.yml @@ -77,4 +77,3 @@ - "31 == ['x','y']|map('extract',{'x':42,'y':31})|list|last" - "'local' == ['localhost']|map('extract',hostvars,'ansible_connection')|list|first" - "'local' == ['localhost']|map('extract',hostvars,['ansible_connection'])|list|first" - - "'amazon' == ['localhost']|map('extract',hostvars,['group_names',0])|list|first" From c9a5f7ea7ef853cfe75ba15437dea13811bdfd72 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 8 Jan 2016 17:57:09 -0500 Subject: [PATCH 0318/1113] updated self to cls to match convention tempted to just use this. in all cases --- lib/ansible/utils/display.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index ef5a4bc6872..0447585fa31 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -280,12 +280,12 @@ class Display: return input(prompt_string) @classmethod - def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + def do_var_prompt(cls, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): result = None if sys.__stdin__.isatty(): - do_prompt = self.prompt + do_prompt = cls.prompt if prompt and default is not None: msg = "%s [%s]: " % (prompt, default) From 01a99f52a9018252b21f72ce3d41dcb9e5e9031a Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Fri, 8 Jan 2016 12:51:31 -0500 Subject: [PATCH 0319/1113] adds shared module nxos for building cisco nxos modules This commit refactors the nxapi into a new shared module nxos that supports connectivity over both ssh (cli) and nxapi. It supercedes the nxapi shared module and removes it from module_utils. This commit also adds a documentation fragement supporting the nxos shared module --- lib/ansible/module_utils/nxos.py | 216 ++++++++++++++++++ .../utils/module_docs_fragments/nxos.py | 69 ++++++ 2 files changed, 285 insertions(+) create mode 100644 lib/ansible/module_utils/nxos.py create mode 100644 lib/ansible/utils/module_docs_fragments/nxos.py diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py new file mode 100644 index 00000000000..5bde5cccd1d --- /dev/null +++ b/lib/ansible/module_utils/nxos.py @@ -0,0 +1,216 @@ +# +# (c) 2015 Peter Sprygada, <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I) + +NET_COMMON_ARGS = dict( + host=dict(required=True), + port=dict(type='int'), + username=dict(required=True), + password=dict(no_log=True), + transport=dict(choices=['cli', 'nxapi']), + use_ssl=dict(default=False, type='bool') +) + +NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash'] +NXAPI_ENCODINGS = ['json', 'xml'] + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + +class Nxapi(object): + + def __init__(self, module): + self.module = module + + # sets the module_utils/urls.py req parameters + self.module.params['url_username'] = module.params['username'] + self.module.params['url_password'] = module.params['password'] + + self.url = None + self.enable = None + + def _get_body(self, commands, command_type, encoding, version='1.2', chunk='0', sid=None): + """Encodes a NXAPI JSON request message + """ + if isinstance(commands, (list, set, tuple)): + commands = ' ;'.join(commands) + + if encoding not in NXAPI_ENCODINGS: + self.module.fail_json("Invalid encoding. Received %s. Expected one of %s" % + (encoding, ','.join(NXAPI_ENCODINGS))) + + msg = { + 'version': version, + 'type': command_type, + 'chunk': chunk, + 'sid': sid, + 'input': commands, + 'output_format': encoding + } + return dict(ins_api=msg) + + def connect(self): + host = self.module.params['host'] + port = self.module.params['port'] + + if self.module.params['use_ssl']: + proto = 'https' + if not port: + port = 443 + else: + proto = 'http' + if not port: + port = 80 + + self.url = '%s://%s:%s/ins' % (proto, host, port) + + def send(self, commands, command_type='cli_show_ascii', encoding='json'): + """Send commands to the device. + """ + clist = to_list(commands) + + if command_type not in NXAPI_COMMAND_TYPES: + self.module.fail_json(msg="Invalid command_type. Received %s. Expected one of %s." % + (command_type, ','.join(NXAPI_COMMAND_TYPES))) + + data = self._get_body(clist, command_type, encoding) + data = self.module.jsonify(data) + + headers = {'Content-Type': 'application/json'} + + response, headers = fetch_url(self.module, self.url, data=data, headers=headers, + method='POST') + + if headers['status'] != 200: + self.module.fail_json(**headers) + + response = self.module.from_json(response.read()) + if 'error' in response: + err = response['error'] + self.module.fail_json(msg='json-rpc error % ' % str(err)) + + return response + +class Cli(object): + + def __init__(self, module): + self.module = module + self.shell = None + + def connect(self, **kwargs): + host = self.module.params['host'] + port = self.module.params['port'] or 22 + + username = self.module.params['username'] + password = self.module.params['password'] + + self.shell = Shell() + self.shell.open(host, port=port, username=username, password=password) + + def send(self, commands, encoding='text'): + return self.shell.send(commands) + +class NxosModule(AnsibleModule): + + def __init__(self, *args, **kwargs): + super(NxosModule, self).__init__(*args, **kwargs) + self.connection = None + self._config = None + + @property + def config(self): + if not self._config: + self._config = self.get_config() + return self._config + + def connect(self): + if self.params['transport'] == 'nxapi': + self.connection = Nxapi(self) + else: + self.connection = Cli(self) + + try: + self.connection.connect() + self.execute('terminal length 0') + except Exception, exc: + self.fail_json(msg=exc.message) + + def configure(self, commands): + commands = to_list(commands) + if self.params['transport'] == 'cli': + commands.insert(0, 'configure terminal') + responses = self.execute(commands) + responses.pop(0) + else: + responses = self.execute(commands, command_type='cli_conf') + return responses + + def execute(self, commands, **kwargs): + try: + return self.connection.send(commands, **kwargs) + except Exception, exc: + self.fail_json(msg=exc.message) + + def disconnect(self): + self.connection.close() + + def parse_config(self, cfg): + return parse(cfg, indent=2) + + def get_config(self): + cmd = 'show running-config' + if self.params['include_defaults']: + cmd += ' all' + if self.params['transport'] == 'cli': + return self.execute(cmd)[0] + else: + resp = self.execute(cmd) + if not resp.get('ins_api').get('outputs').get('output').get('body'): + self.fail_json(msg="Unrecognized response: %s" % str(resp)) + return resp['ins_api']['outputs']['output']['body'] + +def get_module(**kwargs): + """Return instance of EosModule + """ + + argument_spec = NET_COMMON_ARGS.copy() + if kwargs.get('argument_spec'): + argument_spec.update(kwargs['argument_spec']) + kwargs['argument_spec'] = argument_spec + kwargs['check_invalid_arguments'] = False + + module = NxosModule(**kwargs) + + # HAS_PARAMIKO is set by module_utils/shell.py + if module.params['transport'] == 'cli' and not HAS_PARAMIKO: + module.fail_json(msg='paramiko is required but does not appear to be installed') + + # copy in values from local action. + params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + for key, value in params.iteritems(): + module.params[key] = value + + module.connect() + + return module diff --git a/lib/ansible/utils/module_docs_fragments/nxos.py b/lib/ansible/utils/module_docs_fragments/nxos.py new file mode 100644 index 00000000000..37d287ea722 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/nxos.py @@ -0,0 +1,69 @@ +# +# (c) 2015, Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = """ +options: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when buiding the connection to the remote + device. This value applies to either I(cli) or I(nxapi). The port + value will default to the approriate transport common port if + none is provided in the task. (cli=22, http=80, https=443). + required: false + default: 0 (use common port) + username: + description: + - Configures the usename to use to authenticate the connection to + the remote device. The value of I(username) is used to authenticate + either the CLI login or the nxapi authentication depending on which + transport is used. + required: true + password: + description: + - Specifies the password to use when authentication the connection to + the remote device. This is a common argument used for either I(cli) + or I(nxapi) transports. + required: false + default: null + transport: + description: + - Configures the transport connection to use when connecting to the + remote device. The transport argument supports connectivity to the + device over cli (ssh) or nxapi. + required: true + default: cli + use_ssl: + description: + - Configures the I(transport) to use SSL if set to true only when the + I(transport) argument is configured as nxapi. If the transport + argument is not nxapi, this value is ignored + required: false + default: false + choices: BOOLEANS + +""" From 3ae6fd4b3100429a0c3ee028e2932f091fd3270b Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Sat, 9 Jan 2016 10:18:17 -0500 Subject: [PATCH 0320/1113] initial add of openswitch shared module This commit adds a new shared module openswitch for building modules that work with OpenSwitch. This shared module supports connectivity to OpenSwitch devices over SSH, CLI or REST. It also adds an openswitch documentation fragment for use in modules --- lib/ansible/module_utils/openswitch.py | 246 ++++++++++++++++++ .../utils/module_docs_fragments/openswitch.py | 66 +++++ 2 files changed, 312 insertions(+) create mode 100644 lib/ansible/module_utils/openswitch.py create mode 100644 lib/ansible/utils/module_docs_fragments/openswitch.py diff --git a/lib/ansible/module_utils/openswitch.py b/lib/ansible/module_utils/openswitch.py new file mode 100644 index 00000000000..9ff7450ee74 --- /dev/null +++ b/lib/ansible/module_utils/openswitch.py @@ -0,0 +1,246 @@ +# +# (c) 2015 Peter Sprygada, <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +import time +import json + +try: + from runconfig import runconfig + from opsrest.settings import settings + from opsrest.manager import OvsdbConnectionManager + from opslib import restparser + HAS_OPS = True +except ImportError: + HAS_OPS = False + +NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I) + +NET_COMMON_ARGS = dict( + host=dict(), + port=dict(type='int'), + username=dict(), + password=dict(no_log=True), + transport=dict(default='ssh', choices=['ssh', 'cli', 'rest']), +) + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + +def get_idl(): + manager = OvsdbConnectionManager(settings.get('ovs_remote'), + settings.get('ovs_schema')) + manager.start() + idl = manager.idl + + init_seq_no = 0 + while (init_seq_no == idl.change_seqno): + idl.run() + time.sleep(1) + + return idl + +def get_schema(): + return restparser.parseSchema(settings.get('ext_schema')) + +def get_runconfig(): + idl = get_idl() + schema = get_schema() + return runconfig.RunConfigUtil(idl, schema) + +class Response(object): + + def __init__(self, resp, hdrs): + self.body = resp.read() + self.headers = hdrs + + @property + def json(self): + try: + return json.loads(self.body) + except ValueError: + return None + +class Rest(object): + + def __init__(self, module): + self.module = module + self.baseurl = None + + def connect(self): + host = self.module.params['host'] + port = self.module.params['port'] + + if self.module.params['use_ssl']: + proto = 'https' + if not port: + port = 443 + else: + proto = 'http' + if not port: + port = 80 + + self.baseurl = '%s://%s:%s/rest/v1' % (proto, host, port) + + def _url_builder(self, path): + if path[0] == '/': + path = path[1:] + return '%s/%s' % (self.baseurl, path) + + def send(self, method, path, data=None, headers=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + + if headers is None: + headers = dict() + headers.update({'Content-Type': 'application/json'}) + + resp, hdrs = fetch_url(self.module, url, data=data, headers=headers, + method=method) + + return Response(resp, hdrs) + + def get(self, path, data=None, headers=None): + return self.send('GET', path, data, headers) + + def put(self, path, data=None, headers=None): + return self.send('PUT', path, data, headers) + + def post(self, path, data=None, headers=None): + return self.send('POST', path, data, headers) + + def delete(self, path, data=None, headers=None): + return self.send('DELETE', path, data, headers) + +class Cli(object): + + def __init__(self, module): + self.module = module + self.shell = None + + def connect(self, **kwargs): + host = self.module.params['host'] + port = self.module.params['port'] or 22 + + username = self.module.params['username'] + password = self.module.params['password'] + + self.shell = Shell() + self.shell.open(host, port=port, username=username, password=password) + + def send(self, commands, encoding='text'): + return self.shell.send(commands) + +class OpsModule(AnsibleModule): + + def __init__(self, *args, **kwargs): + super(OpsModule, self).__init__(*args, **kwargs) + self.connection = None + self._config = None + self._runconfig = None + + @property + def config(self): + if not self._config: + self._config = self.get_config() + return self._config + + def connect(self): + if self.params['transport'] == 'rest': + self.connection = Rest(self) + elif self.params['transport'] == 'cli': + self.connection = Cli(self) + + try: + self.connection.connect() + except Exception, exc: + self.fail_json(msg=exc.message) + + def configure(self, config): + if self.params['transport'] == 'cli': + commands = to_list(config) + commands.insert(0, 'configure terminal') + responses = self.execute(commands) + responses.pop(0) + return responses + elif self.params['transport'] == 'rest': + path = '/system/full-configuration' + return self.connection.put(path, data=config) + else: + if not self._runconfig: + self._runconfig = get_runconfig() + self._runconfig.write_config_to_db(config) + + def execute(self, commands, **kwargs): + try: + return self.connection.send(commands, **kwargs) + except Exception, exc: + self.fail_json(msg=exc.message, commands=commands) + + def disconnect(self): + self.connection.close() + + def parse_config(self, cfg): + return parse(cfg, indent=4) + + def get_config(self): + if self.params['transport'] == 'cli': + return self.execute('show running-config')[0] + + elif self.params['transport'] == 'rest': + resp = self.connection.get('/system/full-configuration') + return resp.json + + else: + if not self._runconfig: + self._runconfig = get_runconfig() + return self._runconfig.get_running_config() + + +def get_module(**kwargs): + """Return instance of OpsModule + """ + argument_spec = NET_COMMON_ARGS.copy() + if kwargs.get('argument_spec'): + argument_spec.update(kwargs['argument_spec']) + kwargs['argument_spec'] = argument_spec + kwargs['check_invalid_arguments'] = False + + module = OpsModule(**kwargs) + + if not HAS_OPS and module.params['transport'] == 'ssh': + module.fail_json(msg='could not import ops library') + + # HAS_PARAMIKO is set by module_utils/shell.py + if module.params['transport'] == 'cli' and not HAS_PARAMIKO: + module.fail_json(msg='paramiko is required but does not appear to be installed') + + # copy in values from local action. + params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + for key, value in params.iteritems(): + module.params[key] = value + + if module.params['transport'] in ['cli', 'rest']: + module.connect() + + return module + diff --git a/lib/ansible/utils/module_docs_fragments/openswitch.py b/lib/ansible/utils/module_docs_fragments/openswitch.py new file mode 100644 index 00000000000..1427fc75253 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/openswitch.py @@ -0,0 +1,66 @@ +# +# (c) 2015, Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = """ +options: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. Note this argument + does not affect the SSH argument. + required: true + port: + description: + - Specifies the port to use when buiding the connection to the remote + device. This value applies to either I(cli) or I(). The port + value will default to the approriate transport common port if + none is provided in the task. (cli=22, http=80, https=443). Note + this argument does not affect the SSH transport. + required: false + default: 0 (use common port) + username: + description: + - Configures the usename to use to authenticate the connection to + the remote device. The value of I(username) is used to authenticate + either the CLI login or the eAPI authentication depending on which + transport is used. Note this argument does not affect the SSH + transport. + required: true + password: + description: + - Specifies the password to use when authentication the connection to + the remote device. This is a common argument used for either I(cli) + or I(rest) transports. Note this argument does not affect the SSH + transport + required: false + default: null + transport: + description: + - Configures the transport connection to use when connecting to the + remote device. The transport argument supports connectivity to the + device over ssh, cli or REST. + required: true + default: ssh + choices: ['ssh', 'cli', 'rest'] + +""" From 04ace28f6db2aea076871938b9ac218790324256 Mon Sep 17 00:00:00 2001 From: Clare Macrae <github@cfmacrae.fastmail.co.uk> Date: Sat, 9 Jan 2016 22:34:03 +0000 Subject: [PATCH 0321/1113] Fixed typo: Integerations -> Integrations --- docsite/rst/galaxy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 642a828b6e1..ba841eed09d 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -328,7 +328,7 @@ This only removes the role from Galaxy. It does not impact the actual GitHub rep `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy] section of your ansible.cfg file. -Setup Travis Integerations +Setup Travis Integrations -------------------------- Using the setup command you can enable notifications from `travis <http://travis-ci.org>`_. The setup command expects that the user previously authenticated with Galaxy using the login command. From 2eb22d55366076f24505826bc048dfee323888cf Mon Sep 17 00:00:00 2001 From: Thomas Quinot <thomas@quinot.org> Date: Sun, 10 Jan 2016 10:27:50 +0100 Subject: [PATCH 0322/1113] Fix minor typo in comment --- lib/ansible/playbook/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py index 4ca8e4e463a..7725b5c3c9b 100644 --- a/lib/ansible/playbook/base.py +++ b/lib/ansible/playbook/base.py @@ -153,7 +153,7 @@ class Base: setattr(Base, name, property(getter, setter, deleter)) # Place the value into the instance so that the property can - # process and hold that value/ + # process and hold that value. setattr(self, name, value.default) def preprocess_data(self, ds): From e709095f53f39ad792d4e7f4c9ec5f4a4afde111 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Sun, 10 Jan 2016 10:33:00 -0500 Subject: [PATCH 0323/1113] initial add of junos shared module This commit adds a new shared module for working with network devices running the Juniper Junos operating system. The commit includes a new document fragment junos to be used when building modules. The junos shared module currently only supports CLI --- lib/ansible/module_utils/junos.py | 126 ++++++++++++++++++ .../utils/module_docs_fragments/junos.py | 52 ++++++++ 2 files changed, 178 insertions(+) create mode 100644 lib/ansible/module_utils/junos.py create mode 100644 lib/ansible/utils/module_docs_fragments/junos.py diff --git a/lib/ansible/module_utils/junos.py b/lib/ansible/module_utils/junos.py new file mode 100644 index 00000000000..fa3104101e4 --- /dev/null +++ b/lib/ansible/module_utils/junos.py @@ -0,0 +1,126 @@ +# +# (c) 2015 Peter Sprygada, <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# + +NET_COMMON_ARGS = dict( + host=dict(required=True), + port=dict(default=22, type='int'), + username=dict(required=True), + password=dict(no_log=True) +) + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + +class Cli(object): + + def __init__(self, module): + self.module = module + self.shell = None + + def connect(self, **kwargs): + host = self.module.params['host'] + port = self.module.params['port'] or 22 + + username = self.module.params['username'] + password = self.module.params['password'] + + self.shell = Shell() + self.shell.open(host, port=port, username=username, password=password) + + def send(self, commands): + return self.shell.send(commands) + + +class JunosModule(AnsibleModule): + + def __init__(self, *args, **kwargs): + super(JunosModule, self).__init__(*args, **kwargs) + self.connection = None + self._config = None + + @property + def config(self): + if not self._config: + self._config = self.get_config() + return self._config + + def connect(self): + try: + self.connection = Cli(self) + self.connection.connect() + self.execute('cli') + self.execute('set cli screen-length 0') + except ShellErrror, exc: + self.fail_json(msg=str(exc)) + + def configure(self, commands): + commands = to_list(commands) + commands.insert(0, 'configure') + commands.append('commit and-quit') + responses = self.execute(commands) + responses.pop(0) + responses.pop() + return responses + + def execute(self, commands, **kwargs): + try: + return self.connection.send(commands) + except ShellError, exc: + self.fail_json(msg=exc.message) + + def disconnect(self): + self.connection.close() + + def parse_config(self, cfg): + return parse(cfg, indent=4) + + def get_config(self): + cmd = 'show configuration' + return self.execute(cmd)[0] + +def get_module(**kwargs): + """Return instance of JunosModule + """ + + argument_spec = NET_COMMON_ARGS.copy() + if kwargs.get('argument_spec'): + argument_spec.update(kwargs['argument_spec']) + kwargs['argument_spec'] = argument_spec + kwargs['check_invalid_arguments'] = False + + module = JunosModule(**kwargs) + + # HAS_PARAMIKO is set by module_utils/shell.py + if not HAS_PARAMIKO: + module.fail_json(msg='paramiko is required but does not appear to be installed') + + # copy in values from local action. + params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + for key, value in params.iteritems(): + module.params[key] = value + + module.connect() + + return module + diff --git a/lib/ansible/utils/module_docs_fragments/junos.py b/lib/ansible/utils/module_docs_fragments/junos.py new file mode 100644 index 00000000000..d7edb02da7f --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/junos.py @@ -0,0 +1,52 @@ +# +# (c) 2015, Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + + +class ModuleDocFragment(object): + + # Standard files documentation fragment + DOCUMENTATION = """ +options: + host: + description: + - Specifies the DNS host name or address for connecting to the remote + device over the specified transport. The value of host is used as + the destination address for the transport. + required: true + port: + description: + - Specifies the port to use when buiding the connection to the remote + device. The port value will default to the well known SSH port + of 22 + required: false + default: 22 + username: + description: + - Configures the usename to use to authenticate the connection to + the remote device. The value of I(username) is used to authenticate + the SSH session + required: true + password: + description: + - Specifies the password to use when authentication the connection to + the remote device. The value of I(password) is used to authenticate + the SSH session + required: false + default: null + +""" From 01bf3940e3cebd642d4d7f5b777d24231cb6c474 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Sun, 10 Jan 2016 10:30:01 -0500 Subject: [PATCH 0324/1113] adds network config file parser to shared modules This commit adds a new shared module that parses network device configuration files. It is used to build modules that work with the various supported network device operating systems --- lib/ansible/module_utils/netcfg.py | 85 ++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 lib/ansible/module_utils/netcfg.py diff --git a/lib/ansible/module_utils/netcfg.py b/lib/ansible/module_utils/netcfg.py new file mode 100644 index 00000000000..afd8be3a56f --- /dev/null +++ b/lib/ansible/module_utils/netcfg.py @@ -0,0 +1,85 @@ +# +# (c) 2015 Peter Sprygada, <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# + +import re +import collections + +class ConfigLine(object): + + def __init__(self, text): + self.text = text + self.children = list() + self.parents = list() + self.raw = None + + def __str__(self): + return self.raw + + def __eq__(self, other): + if self.text == other.text: + return self.parents == other.parents + + def __ne__(self, other): + return not self.__eq__(other) + +def parse(lines, indent): + toplevel = re.compile(r'\S') + childline = re.compile(r'^\s*(.+)$') + repl = r'([{|}|;])' + + ancestors = list() + config = list() + + for line in str(lines).split('\n'): + text = str(re.sub(repl, '', line)).strip() + + cfg = ConfigLine(text) + cfg.raw = line + + if not text or text[0] in ['!', '#']: + continue + + # handle top level commands + if toplevel.match(line): + ancestors = [cfg] + + # handle sub level commands + else: + match = childline.match(line) + line_indent = match.start(1) + level = int(line_indent / indent) + parent_level = level - 1 + + cfg.parents = ancestors[:level] + + if level > len(ancestors): + config.append(cfg) + continue + + for i in range(level, len(ancestors)): + ancestors.pop() + + ancestors.append(cfg) + ancestors[parent_level].children.append(cfg) + + config.append(cfg) + + return config + + From c3dd0213ef2046884003196338cf611e71333af7 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <privateip@users.noreply.github.com> Date: Sun, 10 Jan 2016 14:56:15 -0500 Subject: [PATCH 0325/1113] deletes nxapi from shared modules The nxapi module has been superseded by the nxos shared module and is not longer needed. This commit removes (deletes) nxapi from module_utils. All custom modules that have used nxapi should be using nxos instead. --- lib/ansible/module_utils/nxapi.py | 155 ------------------------------ 1 file changed, 155 deletions(-) delete mode 100644 lib/ansible/module_utils/nxapi.py diff --git a/lib/ansible/module_utils/nxapi.py b/lib/ansible/module_utils/nxapi.py deleted file mode 100644 index 35bcc442fbd..00000000000 --- a/lib/ansible/module_utils/nxapi.py +++ /dev/null @@ -1,155 +0,0 @@ -# -# (c) 2015 Peter Sprygada, <psprygada@ansible.com> -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. -# -""" -This module adds support for Cisco NXAPI to Ansible shared -module_utils. It builds on module_utils/urls.py to provide -NXAPI support over HTTP/S which is required for proper operation. - -In order to use this module, include it as part of a custom -module as shown below. - -** Note: The order of the import statements does matter. ** - -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.nxapi import * - -The nxapi module provides the following common argument spec: - - * host (str) - The IPv4 address or FQDN of the network device - - * port (str) - Overrides the default port to use for the HTTP/S - connection. The default values are 80 for HTTP and - 443 for HTTPS - - * username (str) - The username to use to authenticate - the HTTP/S connection. Aliases: username - - * password (str) - The password to use to authenticate - the HTTP/S connection. Aliases: password - - * use_ssl (bool) - Specifies whether or not to use an encrypted (HTTPS) - connection or not. The default value is False. - - * command_type (str) - The type of command to send to the remote - device. Valid values in `cli_show`, `cli_show_ascii`, 'cli_conf` - and `bash`. The default value is `cli_show_ascii` - - * device (dict) - Used to send the entire set of connection parameters - as a dict object. This argument is mutually exclusive with the - host argument - -In order to communicate with Cisco NXOS devices, the NXAPI feature -must be enabled and configured on the device. - -""" - -NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash'] - -NXAPI_COMMON_ARGS = dict( - host=dict(), - port=dict(), - username=dict(), - password=dict(), - use_ssl=dict(default=False, type='bool'), - device=dict(), - command_type=dict(default='cli_show_ascii', choices=NXAPI_COMMAND_TYPES) -) - -def nxapi_module(**kwargs): - """Append the common args to the argument_spec - """ - spec = kwargs.get('argument_spec') or dict() - - argument_spec = url_argument_spec() - argument_spec.update(NXAPI_COMMON_ARGS) - if kwargs.get('argument_spec'): - argument_spec.update(kwargs['argument_spec']) - kwargs['argument_spec'] = argument_spec - - module = AnsibleModule(**kwargs) - - device = module.params.get('device') or dict() - for key, value in device.iteritems(): - if key in NXAPI_COMMON_ARGS: - module.params[key] = value - - params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - for key, value in params.iteritems(): - if key != 'device': - module.params[key] = value - - return module - -def nxapi_url(params): - """Constructs a valid NXAPI url - """ - if params['use_ssl']: - proto = 'https' - else: - proto = 'http' - host = params['host'] - url = '{}://{}'.format(proto, host) - if params['port']: - url = '{}:{}'.format(url, params['port']) - url = '{}/ins'.format(url) - return url - -def nxapi_body(commands, command_type, **kwargs): - """Encodes a NXAPI JSON request message - """ - if isinstance(commands, (list, set, tuple)): - commands = ' ;'.join(commands) - - msg = { - 'version': kwargs.get('version') or '1.2', - 'type': command_type, - 'chunk': kwargs.get('chunk') or '0', - 'sid': kwargs.get('sid'), - 'input': commands, - 'output_format': 'json' - } - - return dict(ins_api=msg) - -def nxapi_command(module, commands, command_type=None, **kwargs): - """Sends the list of commands to the device over NXAPI - """ - url = nxapi_url(module.params) - - command_type = command_type or module.params['command_type'] - - data = nxapi_body(commands, command_type) - data = module.jsonify(data) - - headers = {'Content-Type': 'text/json'} - - module.params['url_username'] = module.params['username'] - module.params['url_password'] = module.params['password'] - - response, headers = fetch_url(module, url, data=data, headers=headers, - method='POST') - - status = kwargs.get('status') or 200 - if headers['status'] != status: - module.fail_json(**headers) - - response = module.from_json(response.read()) - return response, headers - From 766738ef7cdc9c593ae8cfd7bf6458bef9b68969 Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Sun, 10 Jan 2016 22:37:20 +0100 Subject: [PATCH 0326/1113] cloudstack: fix case insensitivity cloudstack: fix has_change reports changed for case insensitivity values --- lib/ansible/module_utils/cloudstack.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 53d142847d6..d9b29fefe7a 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -78,6 +78,10 @@ class AnsibleCloudStack(object): self.returns = {} # these values will be casted to int self.returns_to_int = {} + # these keys will be compared case sensitive in self.has_changed() + self.case_sensitive_keys = [ + 'id', + ] self.module = module self._connect() @@ -138,16 +142,14 @@ class AnsibleCloudStack(object): continue if key in current_dict: - - # API returns string for int in some cases, just to make sure - if isinstance(value, int): - current_dict[key] = int(current_dict[key]) - elif isinstance(value, str): - current_dict[key] = str(current_dict[key]) - - # Only need to detect a singe change, not every item - if value != current_dict[key]: + if self.case_sensitive_keys and key in self.case_sensitive_keys: + if str(value) != str(current_dict[key]): + return True + # Test for diff in case insensitive way + elif str(value).lower() != str(current_dict[key]).lower(): return True + else: + return True return False @@ -218,7 +220,7 @@ class AnsibleCloudStack(object): vms = self.cs.listVirtualMachines(**args) if vms: for v in vms['virtualmachine']: - if vm in [ v['name'], v['displayname'], v['id'] ]: + if vm.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]: self.vm = v return self._get_by_key(key, self.vm) self.module.fail_json(msg="Virtual machine '%s' not found" % vm) @@ -238,7 +240,7 @@ class AnsibleCloudStack(object): if zones: for z in zones['zone']: - if zone in [ z['name'], z['id'] ]: + if zone.lower() in [ z['name'].lower(), z['id'] ]: self.zone = z return self._get_by_key(key, self.zone) self.module.fail_json(msg="zone '%s' not found" % zone) From c4cbeeffa89a6265483c187f493bc90ef13bbac9 Mon Sep 17 00:00:00 2001 From: Karthik T <karthikt.holmes+github@gmail.com> Date: Mon, 11 Jan 2016 16:30:52 +0800 Subject: [PATCH 0327/1113] Fixes #13763 Update connections _play_context on every iteration If this isnt updated, the _connection is reused, and thus has an outdated _play_context This results in outdated `success_key` and `prompt` causing issues if sudo is run in a loop Refer to the issue #13763 for more debugging and details --- lib/ansible/executor/task_executor.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 4a2d30a2cd2..9b7ac8b1563 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -365,6 +365,9 @@ class TaskExecutor: if not self._connection or not getattr(self._connection, 'connected', False): self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides(host=self._host) + #If connection is reused, its _play_context is no longer valid and needs to be replaced + #This fixes issues with tasks running sudo in a loop and having the success_key incorrect in the second iteration + self._connection._play_context = self._play_context self._handler = self._get_action_handler(connection=self._connection, templar=templar) From e01ff3b35261e52f50b1d84de8ec9e2790427a8f Mon Sep 17 00:00:00 2001 From: Robin Roth <robin-roth@online.de> Date: Mon, 11 Jan 2016 14:14:20 +0100 Subject: [PATCH 0328/1113] remove unused imports clean up imports of time.sleep and stat that are no (longer) used here --- lib/ansible/utils/path.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/utils/path.py b/lib/ansible/utils/path.py index ffac578243d..d8dc4234265 100644 --- a/lib/ansible/utils/path.py +++ b/lib/ansible/utils/path.py @@ -18,8 +18,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os -import stat -from time import sleep from errno import EEXIST __all__ = ['unfrackpath'] From 7124a525a2e030451307d61e400c335170634a86 Mon Sep 17 00:00:00 2001 From: Sandra Wills <docschick@ansible.com> Date: Mon, 11 Jan 2016 08:54:49 -0500 Subject: [PATCH 0329/1113] added custom code for swiftype custom search Google custom search replacement --- docsite/_themes/srtd/searchbox.html | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/docsite/_themes/srtd/searchbox.html b/docsite/_themes/srtd/searchbox.html index b0310cff989..b729e8cd64d 100644 --- a/docsite/_themes/srtd/searchbox.html +++ b/docsite/_themes/srtd/searchbox.html @@ -4,7 +4,21 @@ <input type="hidden" name="area" value="default" /> </form> --> -<script> +<script type="text/javascript"> + (function(w,d,t,u,n,s,e){w['SwiftypeObject']=n;w[n]=w[n]||function(){ + (w[n].q=w[n].q||[]).push(arguments);};s=d.createElement(t); + e=d.getElementsByTagName(t)[0];s.async=1;s.src=u;e.parentNode.insertBefore(s,e); + })(window,document,'script','//s.swiftypecdn.com/install/v2/st.js','_st'); + + _st('install','AG-cRmit6D-ZGiWz61k_','2.0.0'); +</script> + +<!-- the above was added by swills/docschick as the new custom search engine, Swiftype, while the original CGS content below is commented out, + this can only be tested once built (I can't see it locally) and I didn't want to remove the old content until this was built/tested 7-Jan-16) --> + + + +<!-- <script> (function() { var cx = '006019874985968165468:eu5pbnxp4po'; var gcse = document.createElement('script'); @@ -59,3 +73,4 @@ e.preventDefault(); }); </script> +--> \ No newline at end of file From 8ff47d63e1aa32492b2192bc922f5371d315b4e9 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 11 Jan 2016 11:26:39 -0500 Subject: [PATCH 0330/1113] removed unused files to avoid confusion really, its a vcs, no need to rename to _old and keep stuff around --- docsite/_themes/srtd/layout_old.html | 205 --------------------------- 1 file changed, 205 deletions(-) delete mode 100644 docsite/_themes/srtd/layout_old.html diff --git a/docsite/_themes/srtd/layout_old.html b/docsite/_themes/srtd/layout_old.html deleted file mode 100644 index deb8df2a1a7..00000000000 --- a/docsite/_themes/srtd/layout_old.html +++ /dev/null @@ -1,205 +0,0 @@ -{# - basic/layout.html - ~~~~~~~~~~~~~~~~~ - - Master layout template for Sphinx themes. - - :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. - :license: BSD, see LICENSE for details. -#} -{%- block doctype -%} -<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" - "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> -{%- endblock %} -{%- set reldelim1 = reldelim1 is not defined and ' »' or reldelim1 %} -{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %} -{%- set render_sidebar = (not embedded) and (not theme_nosidebar|tobool) and - (sidebars != []) %} -{%- set url_root = pathto('', 1) %} -{# XXX necessary? #} -{%- if url_root == '#' %}{% set url_root = '' %}{% endif %} -{%- if not embedded and docstitle %} - {%- set titlesuffix = " — "|safe + docstitle|e %} -{%- else %} - {%- set titlesuffix = "" %} -{%- endif %} - -{%- macro relbar() %} - <div class="related"> - <h3>{{ _('Navigation') }}</h3> - <ul> - {%- for rellink in rellinks %} - <li class="right" {% if loop.first %}style="margin-right: 10px"{% endif %}> - <a href="{{ pathto(rellink[0]) }}" title="{{ rellink[1]|striptags|e }}" - {{ accesskey(rellink[2]) }}>{{ rellink[3] }}</a> - {%- if not loop.first %}{{ reldelim2 }}{% endif %}</li> - {%- endfor %} - {%- block rootrellink %} - <li><a href="{{ pathto(master_doc) }}">{{ shorttitle|e }}</a>{{ reldelim1 }}</li> - {%- endblock %} - {%- for parent in parents %} - <li><a href="{{ parent.link|e }}" {% if loop.last %}{{ accesskey("U") }}{% endif %}>{{ parent.title }}</a>{{ reldelim1 }}</li> - {%- endfor %} - {%- block relbaritems %} {% endblock %} - </ul> - </div> -{%- endmacro %} - -{%- macro sidebar() %} - {%- if render_sidebar %} - <div class="sphinxsidebar"> - <div class="sphinxsidebarwrapper"> - {%- block sidebarlogo %} - {%- if logo %} - <p class="logo"><a href="{{ pathto(master_doc) }}"> - <img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="Logo"/> - </a></p> - {%- endif %} - {%- endblock %} - {%- if sidebars != None %} - {#- new style sidebar: explicitly include/exclude templates #} - {%- for sidebartemplate in sidebars %} - {%- include sidebartemplate %} - {%- endfor %} - {%- else %} - {#- old style sidebars: using blocks -- should be deprecated #} - {%- block sidebartoc %} - {%- include "localtoc.html" %} - {%- endblock %} - {%- block sidebarrel %} - {%- include "relations.html" %} - {%- endblock %} - {%- block sidebarsourcelink %} - {%- include "sourcelink.html" %} - {%- endblock %} - {%- if customsidebar %} - {%- include customsidebar %} - {%- endif %} - {%- block sidebarsearch %} - {%- include "searchbox.html" %} - {%- endblock %} - {%- endif %} - </div> - </div> - {%- endif %} -{%- endmacro %} - -{%- macro script() %} - <script type="text/javascript"> - var DOCUMENTATION_OPTIONS = { - URL_ROOT: '{{ url_root }}', - VERSION: '{{ release|e }}', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '{{ '' if no_search_suffix else file_suffix }}', - HAS_SOURCE: {{ has_source|lower }} - }; - </script> - {%- for scriptfile in script_files %} - <script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script> - {%- endfor %} -{%- endmacro %} - -{%- macro css() %} - <link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" /> - <link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css" /> - {%- for cssfile in css_files %} - <link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" /> - {%- endfor %} -{%- endmacro %} - -<html xmlns="http://www.w3.org/1999/xhtml"> - <head> - <meta http-equiv="Content-Type" content="text/html; charset={{ encoding }}" /> - {{ metatags }} - {%- block htmltitle %} - <title>{{ title|striptags|e }}{{ titlesuffix }}</title> - {%- endblock %} - {{ css() }} - {%- if not embedded %} - {{ script() }} - {%- if use_opensearch %} - <link rel="search" type="application/opensearchdescription+xml" - title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}" - href="{{ pathto('_static/opensearch.xml', 1) }}"/> - {%- endif %} - {%- if favicon %} - <link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/> - {%- endif %} - {%- endif %} -{%- block linktags %} - {%- if hasdoc('about') %} - <link rel="author" title="{{ _('About these documents') }}" href="{{ pathto('about') }}" /> - {%- endif %} - {%- if hasdoc('genindex') %} - <link rel="index" title="{{ _('Index') }}" href="{{ pathto('genindex') }}" /> - {%- endif %} - {%- if hasdoc('search') %} - <link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}" /> - {%- endif %} - {%- if hasdoc('copyright') %} - <link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}" /> - {%- endif %} - <link rel="top" title="{{ docstitle|e }}" href="{{ pathto('index') }}" /> - {%- if parents %} - <link rel="up" title="{{ parents[-1].title|striptags|e }}" href="{{ parents[-1].link|e }}" /> - {%- endif %} - {%- if next %} - <link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}" /> - {%- endif %} - {%- if prev %} - <link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}" /> - {%- endif %} -{%- endblock %} -{%- block extrahead %} {% endblock %} - </head> - <body> -{%- block header %}{% endblock %} - -{%- block relbar1 %}{{ relbar() }}{% endblock %} - -{%- block content %} - {%- block sidebar1 %} {# possible location for sidebar #} {% endblock %} - - <div class="document"> - {%- block document %} - <div class="documentwrapper"> - {%- if render_sidebar %} - <div class="bodywrapper"> - {%- endif %} - <div class="body"> - {% block body %} {% endblock %} - </div> - {%- if render_sidebar %} - </div> - {%- endif %} - </div> - {%- endblock %} - - {%- block sidebar2 %}{{ sidebar() }}{% endblock %} - <div class="clearer"></div> - </div> -{%- endblock %} - -{%- block relbar2 %}{{ relbar() }}{% endblock %} - -{%- block footer %} - <div class="footer"> - {%- if show_copyright %} - {%- if hasdoc('copyright') %} - {% trans path=pathto('copyright'), copyright=copyright|e %}© <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %} - {%- else %} - {% trans copyright=copyright|e %}© Copyright {{ copyright }}.{% endtrans %} - {%- endif %} - {%- endif %} - {%- if last_updated %} - {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %} - {%- endif %} - {%- if show_sphinx %} - {% trans sphinx_version=sphinx_version|e %}Created using <a href="http://sphinx-doc.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %} - {%- endif %} - </div> - <p>asdf asdf asdf asdf 22</p> -{%- endblock %} - </body> -</html> - From 45e686d03daee074d96a91cfdf5d9a6811c3721f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 11 Jan 2016 11:28:23 -0500 Subject: [PATCH 0331/1113] another unused file --- docsite/_themes/srtd/searchbox.html | 76 ----------------------------- 1 file changed, 76 deletions(-) delete mode 100644 docsite/_themes/srtd/searchbox.html diff --git a/docsite/_themes/srtd/searchbox.html b/docsite/_themes/srtd/searchbox.html deleted file mode 100644 index b729e8cd64d..00000000000 --- a/docsite/_themes/srtd/searchbox.html +++ /dev/null @@ -1,76 +0,0 @@ -<!-- <form class="wy-form" action="{{ pathto('search') }}" method="get"> - <input type="text" name="q" placeholder="Search docs" /> - <input type="hidden" name="check_keywords" value="yes" /> - <input type="hidden" name="area" value="default" /> -</form> --> - -<script type="text/javascript"> - (function(w,d,t,u,n,s,e){w['SwiftypeObject']=n;w[n]=w[n]||function(){ - (w[n].q=w[n].q||[]).push(arguments);};s=d.createElement(t); - e=d.getElementsByTagName(t)[0];s.async=1;s.src=u;e.parentNode.insertBefore(s,e); - })(window,document,'script','//s.swiftypecdn.com/install/v2/st.js','_st'); - - _st('install','AG-cRmit6D-ZGiWz61k_','2.0.0'); -</script> - -<!-- the above was added by swills/docschick as the new custom search engine, Swiftype, while the original CGS content below is commented out, - this can only be tested once built (I can't see it locally) and I didn't want to remove the old content until this was built/tested 7-Jan-16) --> - - - -<!-- <script> - (function() { - var cx = '006019874985968165468:eu5pbnxp4po'; - var gcse = document.createElement('script'); - gcse.type = 'text/javascript'; - gcse.async = true; - gcse.src = (document.location.protocol == 'https:' ? 'https:' : 'http:') + - '//www.google.com/cse/cse.js?cx=' + cx; - var s = document.getElementsByTagName('script')[0]; - s.parentNode.insertBefore(gcse, s); - })(); -</script> - -<form id="search-form-id" action=""> - <input type="text" name="query" id="search-box-id" /> - <a class="search-reset-start" id="search-reset"><i class="fa fa-times"></i></a> - <a class="search-reset-start" id="search-start"><i class="fa fa-search"></i></a> -</form> - -<script type="text/javascript" src="http://www.google.com/cse/brand?form=search-form-id&inputbox=search-box-id"></script> - -<script> - function executeQuery() { - var input = document.getElementById('search-box-id'); - var element = google.search.cse.element.getElement('searchresults-only0'); - element.resultsUrl = '/htmlout/search.html' - if (input.value == '') { - element.clearAllResults(); - $('#page-content, .rst-footer-buttons, #search-start').show(); - $('#search-results, #search-reset').hide(); - } else { - $('#page-content, .rst-footer-buttons, #search-start').hide(); - $('#search-results, #search-reset').show(); - element.execute(input.value); - } - return false; - } - - $('#search-reset').hide(); - - $('#search-box-id').css('background-position', '1em center'); - - $('#search-box-id').on('blur', function() { - $('#search-box-id').css('background-position', '1em center'); - }); - - $('#search-start').click(function(e) { executeQuery(); }); - $('#search-reset').click(function(e) { $('#search-box-id').val(''); executeQuery(); }); - - $('#search-form-id').submit(function(e) { - console.log('submitting!'); - executeQuery(); - e.preventDefault(); - }); -</script> ---> \ No newline at end of file From 9601ae60fdf1c62330a30eeec00db7c0fab6060d Mon Sep 17 00:00:00 2001 From: Sandra Wills <docschick@ansible.com> Date: Mon, 11 Jan 2016 11:22:42 -0500 Subject: [PATCH 0332/1113] adding swiftype search script to footer.html --- docsite/_themes/srtd/footer.html | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docsite/_themes/srtd/footer.html b/docsite/_themes/srtd/footer.html index 30b02a8978b..dc1d70a4d1f 100644 --- a/docsite/_themes/srtd/footer.html +++ b/docsite/_themes/srtd/footer.html @@ -12,6 +12,15 @@ <hr/> +<script type="text/javascript"> + (function(w,d,t,u,n,s,e){w['SwiftypeObject']=n;w[n]=w[n]||function(){ + (w[n].q=w[n].q||[]).push(arguments);};s=d.createElement(t); + e=d.getElementsByTagName(t)[0];s.async=1;s.src=u;e.parentNode.insertBefore(s,e); + })(window,document,'script','//s.swiftypecdn.com/install/v2/st.js','_st'); + + _st('install','yABGvz2N8PwcwBxyfzUc','2.0.0'); +</script> + <p> © Copyright 2016 <a href="http://ansible.com">Ansible, Inc.</a>. From deeb3381f772f96806ff85c2976ab539d1e22d72 Mon Sep 17 00:00:00 2001 From: Sandra Wills <docschick@ansible.com> Date: Mon, 11 Jan 2016 10:58:31 -0500 Subject: [PATCH 0333/1113] adding sitemap for swiftype to core --- ansible-core-sitemap.xml | 2716 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 2716 insertions(+) create mode 100644 ansible-core-sitemap.xml diff --git a/ansible-core-sitemap.xml b/ansible-core-sitemap.xml new file mode 100644 index 00000000000..84a048d3116 --- /dev/null +++ b/ansible-core-sitemap.xml @@ -0,0 +1,2716 @@ +<?xml version="1.0" encoding="UTF-8"?> +<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> +<!-- created with Integrity from http://peacockmedia.software --> + + <url> + <loc>http://docs.ansible.com/ansible/</loc> + <changefreq>weekly</changefreq> + <priority>1.0</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/intro_patterns.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/intro_adhoc.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/intro_configuration.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/intro_getting_started.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/intro.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/intro_inventory.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/intro_installation.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/intro_bsd.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/intro_dynamic_inventory.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/intro_windows.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_filters.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_conditionals.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/quickstart.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_loops.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_variables.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_roles.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_intro.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_blocks.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_async.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_checkmode.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/become.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_acceleration.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_best_practices.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_delegation.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_special_topics.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_strategies.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_environment.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_error_handling.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_prompts.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/modules_intro.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_tags.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_lookups.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_vault.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_startnstep.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/modules_core.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/modules_extra.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_commands_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/common_return_values.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/modules_by_category.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_cloud_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_all_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_clustering_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_database_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_files_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_inventory_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_source_control_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_system_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_utilities_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_monitoring_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_notification_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_messaging_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_network_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_packaging_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_web_infrastructure_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/guide_cloudstack.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/guide_vagrant.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/guides.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/guide_gce.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/list_of_windows_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/guide_aws.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/guide_rax.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/guide_rolling_upgrade.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/developing.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/developing_releases.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/tower.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/developing_inventory.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/developing_test_pr.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/developing_plugins.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/community.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/developing_api.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/developing_modules.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/test_strategies.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/glossary.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/galaxy.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/faq.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/YAMLSyntax.html</loc> + <changefreq>weekly</changefreq> + <priority>0.5</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/index.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/command_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/shell_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/playbooks_filters_ipaddr.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/expect_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/script_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/raw_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/znode_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/xenserver_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cloudtrail_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cloudformation_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/dynamodb_table_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_ami_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_ami_copy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_elb_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_ami_find_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_eip_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_elb_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_asg_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_eni_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_elb_lb_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_eni_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_key_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_lc_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_tag_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_scaling_policy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_metric_alarm_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_snapshot_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_remote_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_vpc_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_vol_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_vpc_igw_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_vpc_subnet_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_vpc_net_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_vpc_net_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_vpc_route_table_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_win_password_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_vpc_route_table_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_vpc_subnet_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ecs_cluster_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/iam_cert_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ecs_taskdefinition_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ecs_task_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/elasticache_subnet_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/iam_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/elasticache_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/iam_policy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rds_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/route53_zone_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rds_subnet_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/route53_health_check_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/route53_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rds_param_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/route53_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/s3_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/sts_assume_role_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/s3_bucket_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/s3_lifecycle_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/sns_topic_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/s3_logging_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/sqs_queue_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/azure_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/clc_aa_policy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/clc_modify_server_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/clc_alert_policy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/clc_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/clc_publicip_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/clc_firewall_policy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/clc_blueprint_package_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/clc_loadbalancer_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/clc_server_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_firewall_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_instance_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/clc_server_snapshot_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_affinitygroup_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_domain_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_account_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_instancegroup_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_iso_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_project_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_ip_address_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_securitygroup_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_loadbalancer_rule_member_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_loadbalancer_rule_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_network_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_portforward_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_securitygroup_rule_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_sshkeypair_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_template_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_staticnat_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/digital_ocean_domain_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_vmsnapshot_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/digital_ocean_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_volume_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cs_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/docker_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/digital_ocean_sshkey_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/docker_login_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/gce_net_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/gce_pd_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/gc_storage_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/docker_image_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/gce_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/gce_lb_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/gce_img_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/gce_tag_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/linode_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/virt_net_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/virt_pool_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_auth_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ovirt_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/lxc_container_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/virt_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/proxmox_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/proxmox_template_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_client_config_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_floating_ip_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_network_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_networks_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_image_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_ironic_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_image_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_ironic_node_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_keypair_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_nova_flavor_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_security_group_rule_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_server_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_server_actions_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_object_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_project_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_security_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_server_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_router_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_port_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_server_volume_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/profitbricks_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/profitbricks_datacenter_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_subnets_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_subnet_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_volume_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/os_user_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/profitbricks_nic_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_cdb_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_cdb_database_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/profitbricks_volume_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/profitbricks_volume_attachments_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_cbs_attachments_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_cdb_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_cbs_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_files_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_files_objects_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_clb_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_dns_record_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_clb_nodes_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_dns_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_identity_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_mon_entity_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_mon_notification_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_mon_notification_plan_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_clb_ssl_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_meta_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_keypair_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_mon_check_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_network_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_mon_alarm_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_queue_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_scaling_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vca_vapp_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vca_nat_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rax_scaling_policy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vca_fw_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_dvswitch_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_cluster_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_host_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_dns_config_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_datacenter_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_dvs_host_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_dvs_portgroup_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_target_canonical_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_migrate_vmk_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_vmkernel_ip_config_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_vswitch_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vsphere_copy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_vm_shell_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_vsan_cluster_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_vmkernel_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vmware_vm_vss_dvs_migrate_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vsphere_guest_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/zypper_repository_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/a10_server_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/a10_service_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/webfaction_db_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/webfaction_domain_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/a10_virtual_server_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/webfaction_app_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/webfaction_site_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/webfaction_mailbox_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/accelerate_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/apache2_module_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/apt_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/acl_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/alternatives_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/airbrake_deployment_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/add_host_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/apt_key_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/apk_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/at_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/authorized_key_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/apt_repository_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/assemble_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/apt_rpm_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/assert_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/async_status_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bigip_gtm_wide_ip_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bigip_pool_member_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bigip_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bigip_monitor_http_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bigpanda_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bower_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bigip_monitor_tcp_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bigip_node_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bigip_pool_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bundler_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/boundary_meter_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/blockinfile_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/consul_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/consul_acl_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/consul_kv_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/campfire_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/composer_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/bzr_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/circonus_annotation_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/capabilities_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/consul_session_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/datadog_event_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/datadog_monitor_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/debconf_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/copy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cronvar_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cpanm_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/cron_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/debug_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/crypttab_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/deploy_helper_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/django_manage_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/dnf_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/dpkg_selections_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/dnsmadeeasy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/dnsimple_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/fail_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/easy_install_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/fetch_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ec2_ami_search_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ejabberd_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/filesystem_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/facter_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/elasticsearch_plugin_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/firewalld_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/file_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/get_url_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/find_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/flowdock_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/git_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/fireball_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/gem_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/getent_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/gluster_volume_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/github_hooks_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/hall_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/hg_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/glance_image_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/hipchat_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/grove_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/group_by_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/haproxy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/homebrew_cask_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/homebrew_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ini_file_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/homebrew_tap_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/irc_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/hostname_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/htpasswd_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/include_vars_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ipify_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/jabber_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/iptables_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/known_hosts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/jboss_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/librato_annotation_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/kernel_blacklist_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/keystone_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/jira_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/layman_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/lvol_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/lineinfile_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/macports_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/logentries_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/locale_gen_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/lvg_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/maven_artifact_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/lldp_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/mail_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/mqtt_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/modprobe_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/mount_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/mongodb_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/mysql_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/monit_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/mysql_db_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/mysql_replication_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/mysql_variables_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/nmcli_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/netscaler_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/nagios_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/newrelic_deployment_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/nexmo_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/nova_compute_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/openvswitch_bridge_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/openvswitch_db_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/openvswitch_port_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ohai_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/npm_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/open_iscsi_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/openbsd_pkg_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/opkg_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/nova_keypair_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pagerduty_alert_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pam_limits_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/patch_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/osx_say_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pacman_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/osx_defaults_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/package_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pagerduty_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pause_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pkg5_publisher_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pear_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pkgin_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pingdom_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pkg5_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ping_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pip_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pkgutil_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/postgresql_lang_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pkgng_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/portage_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/postgresql_privs_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/portinstall_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pushbullet_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/postgresql_db_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/postgresql_ext_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/puppet_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/postgresql_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/quantum_network_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/pushover_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/quantum_router_interface_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/quantum_floating_ip_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/quantum_floating_ip_associate_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rabbitmq_binding_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/quantum_router_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/quantum_router_gateway_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rabbitmq_policy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/quantum_subnet_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rabbitmq_vhost_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rabbitmq_plugin_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rabbitmq_exchange_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rabbitmq_queue_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rabbitmq_parameter_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/redhat_subscription_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rabbitmq_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/riak_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rpm_key_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/seboolean_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rollbar_deployment_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/redis_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rhn_channel_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/rhn_register_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/replace_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/selinux_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/set_fact_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/setup_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/slack_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/sendgrid_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/seport_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/slackpkg_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/sensu_check_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/service_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/selinux_permissive_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/stat_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/subversion_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/slurp_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/supervisorctl_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/solaris_zone_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/sns_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/stackdriver_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/svr4pkg_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/snmp_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/svc_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/twilio_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/swdepot_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/unarchive_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/template_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/sysctl_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/synchronize_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/uptimerobot_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/typetalk_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/ufw_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/uri_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vertica_role_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/urpmi_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vertica_facts_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vertica_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vertica_configuration_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/vertica_schema_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_dotnet_ngen_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/wait_for_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_feature_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_copy_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_chocolatey_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_firewall_rule_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_environment_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_acl_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_file_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_get_url_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_iis_webbinding_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_lineinfile_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_msi_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_iis_webapplication_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_iis_website_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_iis_virtualdirectory_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_iis_webapppool_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_nssm_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_stat_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_template_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_unzip_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_ping_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_package_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_regedit_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_updates_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_service_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_scheduled_task_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_user_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/zabbix_group_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/zabbix_hostmacro_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/win_webpicmd_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/xattr_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/yum_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/yumrepo_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/zabbix_host_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/zabbix_maintenance_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/zypper_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/zabbix_screen_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + <url> + <loc>http://docs.ansible.com/ansible/zfs_module.html</loc> + <changefreq>weekly</changefreq> + <priority>0.3</priority> + </url> + +</urlset> \ No newline at end of file From 51255aed9b6a5b6cfdd229456d3ab41bfcb073f8 Mon Sep 17 00:00:00 2001 From: "James E. Blair" <jeblair@linux.vnet.ibm.com> Date: Mon, 11 Jan 2016 09:48:25 -0800 Subject: [PATCH 0334/1113] Fix typo in API docs This typo caused a SyntaxError when attempting to run the example in the API docs. --- docsite/rst/developing_api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_api.rst b/docsite/rst/developing_api.rst index 319417672e0..99dea5d4afa 100644 --- a/docsite/rst/developing_api.rst +++ b/docsite/rst/developing_api.rst @@ -53,7 +53,7 @@ In 2.0 things get a bit more complicated to start, but you end up with much more name = "Ansible Play", hosts = 'localhost', gather_facts = 'no', - tasks = [ dict(action=dict(module='debug', args=(msg='Hello Galaxy!'))) ] + tasks = [ dict(action=dict(module='debug', args=dict(msg='Hello Galaxy!'))) ] ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) From 6ec4d98764b14cffeb7f8b43c01e015b67237753 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 11 Jan 2016 12:50:04 -0500 Subject: [PATCH 0335/1113] When setting fail state skip RESCUE/ALWAYS if cur_block doesn't have them Fixes #13749 --- lib/ansible/executor/play_iterator.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 147e46e5aa7..3de07ec70c8 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -342,13 +342,21 @@ class PlayIterator: state.tasks_child_state = self._set_failed_state(state.tasks_child_state) else: state.fail_state |= self.FAILED_TASKS - state.run_state = self.ITERATING_RESCUE + if state._blocks[state.cur_block].rescue: + state.run_state = self.ITERATING_RESCUE + elif state._blocks[state.cur_block].always: + state.run_state = self.ITERATING_ALWAYS + else: + state.run_state = self.ITERATING_COMPLETE elif state.run_state == self.ITERATING_RESCUE: if state.rescue_child_state is not None: state.rescue_child_state = self._set_failed_state(state.rescue_child_state) else: state.fail_state |= self.FAILED_RESCUE - state.run_state = self.ITERATING_ALWAYS + if state._blocks[state.cur_block].always: + state.run_state = self.ITERATING_ALWAYS + else: + state.run_state = self.ITERATING_COMPLETE elif state.run_state == self.ITERATING_ALWAYS: if state.always_child_state is not None: state.always_child_state = self._set_failed_state(state.always_child_state) From 45d9cfcc6fded4c17a28fe77d5f04c173a396332 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Mon, 11 Jan 2016 11:55:25 -0600 Subject: [PATCH 0336/1113] Coalesce forms of ssh_args in order of most specific to least --- lib/ansible/plugins/connection/paramiko_ssh.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py index 21dfe0c7bc3..dfac62b7a83 100644 --- a/lib/ansible/plugins/connection/paramiko_ssh.py +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -144,9 +144,13 @@ class Connection(ConnectionBase): def _parse_proxy_command(self, port=22): proxy_command = None # Parse ansible_ssh_common_args, specifically looking for ProxyCommand - ssh_common_args = getattr(self._play_context, 'ssh_common_args', None) + ssh_args = [ + getattr(self._play_context, 'ssh_extra_args', ''), + getattr(self._play_context, 'ssh_common_args', ''), + getattr(self._play_context, 'ssh_args', ''), + ] if ssh_common_args is not None: - args = self._split_ssh_args(ssh_common_args) + args = self._split_ssh_args(' '.join(ssh_args)) for i, arg in enumerate(args): if arg.lower() == 'proxycommand': # _split_ssh_args split ProxyCommand from the command itself From 76ac6294e0d49d6de9b908e87f7507796254ffda Mon Sep 17 00:00:00 2001 From: Tom Paine <aioue@users.noreply.github.com> Date: Mon, 11 Jan 2016 18:21:28 +0000 Subject: [PATCH 0337/1113] profile_tasks README.md As agreed with Brian Coca in Ansible Project group: https://groups.google.com/forum/#!searchin/ansible-project/tom$20paine/ansible-project/__nv6BZs2yU/AkYQ0HU-BQAJ --- lib/ansible/plugins/callback/profile_tasks.md | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 lib/ansible/plugins/callback/profile_tasks.md diff --git a/lib/ansible/plugins/callback/profile_tasks.md b/lib/ansible/plugins/callback/profile_tasks.md new file mode 100644 index 00000000000..b52081d83bf --- /dev/null +++ b/lib/ansible/plugins/callback/profile_tasks.md @@ -0,0 +1,58 @@ +# profile_tasks.py +Ansible plugin for timing individual tasks and overall execution time. + +Mashup of 2 excellent original works: +- (https://github.com/jlafon/ansible-profile) +- (https://github.com/junaid18183/ansible_home/blob/master/ansible_plugins/callback_plugins/timestamp.py.old) + +## Usage + +Add `profile_taks` to the `callback_whitelist` in `ansible.cfg`. + +Run playbooks as normal. + +## Features + +### Tasks + +Ongoing timing of each task as it happens. + +Format: +`<task start timestamp> (<length of previous task>) <current elapsed playbook execution time>` + +```shell +TASK: [ensure messaging security group exists] ******************************** +Thursday 11 June 2017 22:50:53 +0100 (0:00:00.721) 0:00:05.322 ********* +ok: [localhost] + +TASK: [ensure db security group exists] *************************************** +Thursday 11 June 2017 22:50:54 +0100 (0:00:00.558) 0:00:05.880 ********* +changed: [localhost] +``` + +### Play Recap + +Recap includes ending timestamp, total playbook execution time and a sorted list of the top longest running tasks. + +No more wondering how old the results in a terminal window are. + +```shell + ansible <args here> + <normal output here> + PLAY RECAP ******************************************************************** + Thursday 11 June 2016 22:51:00 +0100 (0:00:01.011) 0:00:43.247 ********* + =============================================================================== + really slow task | Download project packages----------------------------11.61s + security | Really slow security policies----------------------------------7.03s + common-base | Install core system dependencies----------------------------3.62s + common | Install pip------------------------------------------------------3.60s + common | Install boto-----------------------------------------------------3.57s + nginx | Install nginx-----------------------------------------------------3.41s + serf | Install system dependencies----------------------------------------3.38s + duo_security | Install Duo Unix SSH Integration---------------------------3.37s + loggly | Install TLS version----------------------------------------------3.36s +``` + +## Compatibility + +Ansible 2.0+ From cacc22abbb05bf99308e05b416cc557028801f36 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 11 Jan 2016 13:57:45 -0800 Subject: [PATCH 0338/1113] Add the porting guide --- docsite/rst/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index 936a485c9e4..fcf350dc3b0 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -40,4 +40,5 @@ Ansible, Inc. releases a new major release of Ansible approximately every two mo faq glossary YAMLSyntax + porting_guide From 7ed4ad23a56733796bb361d803d232196b7bc199 Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Mon, 11 Jan 2016 16:24:30 -0800 Subject: [PATCH 0339/1113] updated new windows module list in changelog --- CHANGELOG.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17180993a2f..9b5b4ac0be7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -295,17 +295,22 @@ allowed in future versions: * webfaction_mailbox * webfaction_site * win_acl +* win_dotnet_ngen * win_environment * win_firewall_rule -* win_package -* win_scheduled_task * win_iis_virtualdirectory * win_iis_webapplication * win_iis_webapppool * win_iis_webbinding * win_iis_website +* win_lineinfile +* win_nssm +* win_package * win_regedit +* win_scheduled_task * win_unzip +* win_updates +* win_webpicmd * xenserver_facts * zabbix_host * zabbix_hostmacro From 477d63211871e3be6140c622dbbf7c3a010fddfe Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 11 Jan 2016 19:47:47 -0500 Subject: [PATCH 0340/1113] now file mode diff shows octal values --- lib/ansible/module_utils/basic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 1366bfceb40..ede8240330e 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -881,10 +881,10 @@ class AnsibleModule(object): if diff is not None: if 'before' not in diff: diff['before'] = {} - diff['before']['mode'] = prev_mode + diff['before']['mode'] = oct(prev_mode) if 'after' not in diff: diff['after'] = {} - diff['after']['mode'] = mode + diff['after']['mode'] = oct(mode) if self.check_mode: return True From d274fea3583517de7f8ef24e7f4f4b4924bea86e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 11 Jan 2016 17:15:34 -0800 Subject: [PATCH 0341/1113] Update submodules ref to pick up fix #558 --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 33014c6db1c..b77bf7a9aa1 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 33014c6db1ce757d0ffa065e6c9924ac4db1cacc +Subproject commit b77bf7a9aa1889fbee75f3db17c89816ca7c7838 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 82a4cf84be8..4d2a20b2f43 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 82a4cf84be82244d0cf7d043c8cbb4f176f086db +Subproject commit 4d2a20b2f433db2492b6b3eb0554177fa42662e4 From bcb22b0e1a208ce6c978f893f7a757129c61ad5e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 11 Jan 2016 17:16:08 -0800 Subject: [PATCH 0342/1113] Integration tests for https://github.com/ansible/ansible-modules-core/pull/558 --- .../integration/roles/test_git/tasks/main.yml | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 46f6e078ee1..49f5f53bfb8 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -386,3 +386,29 @@ - name: assert presence of new file in repo (i.e. working copy updated) assert: that: "repo_content.stat.exists" + +# Test that checkout by branch works when the branch is not in our current repo but the sha is + +- name: clear checkout_dir + file: state=absent path={{ checkout_dir }} + +- name: Clone example git repo that we're going to modify + git: + repo: '{{ repo_update_url_1 }}' + dest: '{{ checkout_dir }}/repo' + +- name: Clone the repo again - this is what we test + git: + repo: '{{ checkout_dir }}/repo' + dest: '{{ checkout_dir }}/checkout' + +- name: Add a branch to the repo + command: git branch new-branch + args: + chdir: '{{ checkout_dir }}/repo' + +- name: Checkout the new branch in the checkout + git: + repo: '{{ checkout_dir}}/repo' + version: 'new-branch' + dest: '{{ checkout_dir }}/checkout' From 935ae2801f974b37a10d3d9ec395606c795e9023 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Mon, 11 Jan 2016 22:40:42 -0500 Subject: [PATCH 0343/1113] bugfix in eos shared module for including defaults --- lib/ansible/module_utils/eos.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/eos.py b/lib/ansible/module_utils/eos.py index e3782a9d097..d4656b11915 100644 --- a/lib/ansible/module_utils/eos.py +++ b/lib/ansible/module_utils/eos.py @@ -179,13 +179,13 @@ class EosModule(AnsibleModule): def get_config(self): cmd = 'show running-config' - if self.params['include_defaults']: + if self.params.get('include_defaults'): cmd += ' all' if self.params['transport'] == 'cli': return self.execute(cmd)[0] else: resp = self.execute(cmd, encoding='text') - return resp[0]['output'] + return resp[0] def get_module(**kwargs): From a758806287e40a1eb591cc6f29d5f936f745d390 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Mon, 11 Jan 2016 22:40:54 -0500 Subject: [PATCH 0344/1113] bugfix in ios shared module for including defaults --- lib/ansible/module_utils/ios.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/ios.py b/lib/ansible/module_utils/ios.py index 550a2de6d53..f6d6037f808 100644 --- a/lib/ansible/module_utils/ios.py +++ b/lib/ansible/module_utils/ios.py @@ -102,7 +102,7 @@ class IosModule(AnsibleModule): def get_config(self): cmd = 'show running-config' - if self.params['include_defaults']: + if self.params.get('include_defaults'): cmd += ' all' return self.execute(cmd)[0] From 4d3aa721439f98ff4a41579878e87aeb5eaf97f2 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Mon, 11 Jan 2016 22:41:06 -0500 Subject: [PATCH 0345/1113] bugfix in nxos shared module for including defaults --- lib/ansible/module_utils/nxos.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py index 5bde5cccd1d..f75ac5123fc 100644 --- a/lib/ansible/module_utils/nxos.py +++ b/lib/ansible/module_utils/nxos.py @@ -180,7 +180,7 @@ class NxosModule(AnsibleModule): def get_config(self): cmd = 'show running-config' - if self.params['include_defaults']: + if self.params.get('include_defaults'): cmd += ' all' if self.params['transport'] == 'cli': return self.execute(cmd)[0] From b58673289ae5e23e36acd44fa9374229f5d66761 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 12 Jan 2016 08:14:21 -0500 Subject: [PATCH 0346/1113] Split up comma-separated tags properly Fixes #13795 --- lib/ansible/playbook/taggable.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py index 8f5cfa09344..54ca377d083 100644 --- a/lib/ansible/playbook/taggable.py +++ b/lib/ansible/playbook/taggable.py @@ -38,7 +38,11 @@ class Taggable: if isinstance(ds, list): return ds elif isinstance(ds, basestring): - return [ ds ] + value = ds.split(',') + if isinstance(value, list): + return [ x.strip() for x in value ] + else: + return [ ds ] else: raise AnsibleError('tags must be specified as a list', obj=ds) From 2c33f5a12b8fa97291147e7937d294951afc3ec9 Mon Sep 17 00:00:00 2001 From: sebastianneubauer <sebineubauer@gmail.com> Date: Tue, 12 Jan 2016 16:22:01 +0100 Subject: [PATCH 0347/1113] added galaxy data not tested, but something like this seems to be missing in the Manifest.in --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index 64c5bf1fcba..a5e29c9a433 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -9,6 +9,7 @@ include examples/hosts include examples/ansible.cfg include lib/ansible/module_utils/powershell.ps1 recursive-include lib/ansible/modules * +recursive-include lib/ansible/galaxy/data * recursive-include docs * recursive-include packaging * include Makefile From c91df36ebe32cfb23c539e3608df42fdc1ef6086 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 12 Jan 2016 11:24:43 -0500 Subject: [PATCH 0348/1113] added module name to missing interpreter error fixes #13816 --- lib/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 6c65716bbf8..c1fcfe670b2 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -386,7 +386,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars) if not shebang: - raise AnsibleError("module is missing interpreter line") + raise AnsibleError("module (%s) is missing interpreter line" % module_name) # a remote tmp path may be necessary and not already created remote_module_path = None From 9f9bff88c2baa9f8e553015bf40817f8381fb2db Mon Sep 17 00:00:00 2001 From: Charles Paul <cpaul@ansible.com> Date: Tue, 12 Jan 2016 11:06:22 -0600 Subject: [PATCH 0349/1113] doc fragments for vmware_ vca_ --- .../utils/module_docs_fragments/vca.py | 83 +++++++++++++++++++ .../utils/module_docs_fragments/vmware.py | 37 +++++++++ 2 files changed, 120 insertions(+) create mode 100644 lib/ansible/utils/module_docs_fragments/vca.py create mode 100644 lib/ansible/utils/module_docs_fragments/vmware.py diff --git a/lib/ansible/utils/module_docs_fragments/vca.py b/lib/ansible/utils/module_docs_fragments/vca.py new file mode 100644 index 00000000000..88cb1b41846 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/vca.py @@ -0,0 +1,83 @@ +# (c) 2016, Charles Paul <cpaul@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + + +class ModuleDocFragment(object): + # Parameters for VCA modules + DOCUMENTATION = """ +options: + username: + description: + - The vca username or email address, if not set the environment variable VCA_USER is checked for the username. + required: false + default: None + aliases: ['user'] + password: + description: + - The vca password, if not set the environment variable VCA_PASS is checked for the password + required: false + default: None + aliases: ['pass', 'pwd'] + org: + description: + - The org to login to for creating vapp, mostly set when the service_type is vdc. + required: false + default: None + instance_id: + description: + - The instance id in a vchs environment to be used for creating the vapp + required: false + default: None + host: + description: + - The authentication host to be used when service type is vcd. + required: false + default: None + api_version: + description: + - The api version to be used with the vca + required: false + default: "5.7" + service_type: + description: + - The type of service we are authenticating against + required: false + default: vca + choices: [ "vca", "vchs", "vcd" ] + state: + description: + - if the object should be added or removed + required: false + default: present + choices: [ "present", "absent" ] + verify_certs: + description: + - If the certificates of the authentication is to be verified + required: false + default: True + vdc_name: + description: + - The name of the vdc where the gateway is located. + required: false + default: None + gateway_name: + description: + - The name of the gateway of the vdc where the rule should be added + required: false + default: gateway +""" + diff --git a/lib/ansible/utils/module_docs_fragments/vmware.py b/lib/ansible/utils/module_docs_fragments/vmware.py new file mode 100644 index 00000000000..0b698865947 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/vmware.py @@ -0,0 +1,37 @@ +# (c) 2016, Charles Paul <cpaul@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + + +class ModuleDocFragment(object): + # Paramaters for VMware modules + DOCUMENTATION = ''' +options: + hostname: + description: + - The hostname or IP address of the vSphere vCenter + required: True + username: + description: + - The username of the vSphere vCenter + required: True + aliases: ['user', 'admin'] + password: + description: + - The password of the vSphere vCenter + required: True + aliases: ['pass', 'pwd'] +''' From 06fa841516fadf4454d8aa05609786a7b9f14b3b Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 12 Jan 2016 12:24:46 -0500 Subject: [PATCH 0350/1113] made loading of galaxy data files lazy --- lib/ansible/galaxy/__init__.py | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py index 62823fced47..e526b0aa873 100644 --- a/lib/ansible/galaxy/__init__.py +++ b/lib/ansible/galaxy/__init__.py @@ -49,11 +49,34 @@ class Galaxy(object): this_dir, this_filename = os.path.split(__file__) self.DATA_PATH = os.path.join(this_dir, "data") - #TODO: move to getter for lazy loading - self.default_readme = self._str_from_data_file('readme') - self.default_meta = self._str_from_data_file('metadata_template.j2') - self.default_test = self._str_from_data_file('test_playbook.j2') - self.default_travis = self._str_from_data_file('travis.j2') + self._default_readme = None + self._default_meta = None + self._default_test = None + self._default_travis = None + + @property + def default_readme(self): + if self._default_readme is None: + self._default_readme = self._str_from_data_file('readme') + return self._default_readme + + @property + def default_meta(self): + if self._default_meta is None: + self._default_meta = self._str_from_data_file('metadata_template.j2') + return self._default_meta + + @property + def default_test(self): + if self._default_test is None: + self._default_test = self._str_from_data_file('test_playbook.j2') + return self._default_test + + @property + def default_travis(self): + if self._default_travis is None: + self._default_travis = self._str_from_data_file('travis.j2') + return self._default_travis def add_role(self, role): self.roles[role.name] = role From 5760f0effbebe55e7150eb06e9a6b691a0e98455 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 12 Jan 2016 13:17:02 -0500 Subject: [PATCH 0351/1113] dont error out on new internal vars --- lib/ansible/module_utils/basic.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index ede8240330e..fb300bc42c4 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -514,9 +514,11 @@ class AnsibleModule(object): self.no_log = no_log self.cleanup_files = [] self._debug = False + self._diff = False + self._verbosity = 0 self.aliases = {} - self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug'] + self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity'] if add_file_common_args: for k, v in FILE_COMMON_ARGUMENTS.items(): @@ -1141,6 +1143,12 @@ class AnsibleModule(object): elif k == '_ansible_debug': self._debug = self.boolean(v) + elif k == '_ansible_diff': + self._diff = self.boolean(v) + + elif k == '_ansible_verbosity': + self._verbosity = v + elif check_invalid_arguments and k not in self._legal_inputs: self.fail_json(msg="unsupported parameter for module: %s" % k) From 06b0161e6d215d316aa5cb93b5b09a766ef4463d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 12 Jan 2016 13:26:46 -0500 Subject: [PATCH 0352/1113] fixed extract version --- docsite/rst/playbooks_filters.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_filters.rst b/docsite/rst/playbooks_filters.rst index c91b04b3675..cae65d71bee 100644 --- a/docsite/rst/playbooks_filters.rst +++ b/docsite/rst/playbooks_filters.rst @@ -357,7 +357,7 @@ setting in `ansible.cfg`. Extracting values from containers --------------------------------- -.. versionadded:: 2.0 +.. versionadded:: 2.1 The `extract` filter is used to map from a list of indices to a list of values from a container (hash or array):: From 2374abbeda465c0aded55fabc18085a1530c9377 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 12 Jan 2016 14:05:33 -0500 Subject: [PATCH 0353/1113] api not thread safe also removed runner ref in general description as it is now a 1.x only thing. --- docsite/rst/developing_api.rst | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docsite/rst/developing_api.rst b/docsite/rst/developing_api.rst index 99dea5d4afa..3fc529758f7 100644 --- a/docsite/rst/developing_api.rst +++ b/docsite/rst/developing_api.rst @@ -6,7 +6,7 @@ Python API There are several interesting ways to use Ansible from an API perspective. You can use the Ansible python API to control nodes, you can extend Ansible to respond to various python events, you can write various plugins, and you can plug in inventory data from external data sources. This document -covers the Runner and Playbook API at a basic level. +covers the execution and Playbook API at a basic level. If you are looking to use Ansible programmatically from something other than Python, trigger events asynchronously, or have access control and logging demands, take a look at :doc:`tower` @@ -17,8 +17,10 @@ This chapter discusses the Python API. .. _python_api: -The Python API is very powerful, and is how the ansible CLI and ansible-playbook -are implemented. In version 2.0 the core ansible got rewritten and the API was mostly rewritten. +The Python API is very powerful, and is how the all the ansible CLI tools are implemented. +In version 2.0 the core ansible got rewritten and the API was mostly rewritten. + +:.. note:: Ansible relies on forking processes, as such teh API is not thread safe. .. _python_api_20: From d9d4ad828a7d3f9f1440e4bec7150756165b754b Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 12 Jan 2016 14:07:39 -0500 Subject: [PATCH 0354/1113] zpell the corerstly --- docsite/rst/developing_api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_api.rst b/docsite/rst/developing_api.rst index 3fc529758f7..96a447c05c1 100644 --- a/docsite/rst/developing_api.rst +++ b/docsite/rst/developing_api.rst @@ -20,7 +20,7 @@ This chapter discusses the Python API. The Python API is very powerful, and is how the all the ansible CLI tools are implemented. In version 2.0 the core ansible got rewritten and the API was mostly rewritten. -:.. note:: Ansible relies on forking processes, as such teh API is not thread safe. +:.. note:: Ansible relies on forking processes, as such the API is not thread safe. .. _python_api_20: From a3dcd99d9675ecf3ae3ae306b641a8228c2a2d58 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 12 Jan 2016 09:20:14 -0800 Subject: [PATCH 0355/1113] Documentation fixes --- docsite/rst/index.rst | 2 +- docsite/rst/intro_dynamic_inventory.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index fcf350dc3b0..4f77125bb9a 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -40,5 +40,5 @@ Ansible, Inc. releases a new major release of Ansible approximately every two mo faq glossary YAMLSyntax - porting_guide + porting_guide_2.0 diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 85feaa143bd..bbaf1a0fff4 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -247,7 +247,7 @@ After a few moments you should see some JSON output with information about your Once you confirm the dynamic inventory script is working as expected, you can tell Ansible to use the `openstack.py` script as an inventory file, as illustrated below:: -ansible -i openstack.py all -m ping + ansible -i openstack.py all -m ping Implicit use of inventory script ++++++++++++++++++++++++++++++++ From 589971fe7ef78ea8bb41fb9ae6cd19cb8e277371 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 12 Jan 2016 12:00:09 -0800 Subject: [PATCH 0356/1113] Update submodule refs to try and fix the taiga_issue module-is-actually-old travis failures --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b77bf7a9aa1..5f6d0f73f42 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b77bf7a9aa1889fbee75f3db17c89816ca7c7838 +Subproject commit 5f6d0f73f4203f05c7a51d906e6c04108d397b39 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 4d2a20b2f43..39d6066f512 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 4d2a20b2f433db2492b6b3eb0554177fa42662e4 +Subproject commit 39d6066f512b3ceaa4f16592c452cb26c17675f6 From 21ac96fa35859facde9dbbd931d213327a0b644d Mon Sep 17 00:00:00 2001 From: jfrappier <jonathan.frappier@gmail.com> Date: Tue, 12 Jan 2016 21:51:47 -0500 Subject: [PATCH 0357/1113] Updated intro_getting_started.rst to reference --ask-become-pass --- docsite/rst/intro_getting_started.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_getting_started.rst b/docsite/rst/intro_getting_started.rst index d6a22a8bb49..7b783209def 100644 --- a/docsite/rst/intro_getting_started.rst +++ b/docsite/rst/intro_getting_started.rst @@ -33,7 +33,7 @@ In releases up to and including Ansible 1.2, the default was strictly paramiko. Occasionally you'll encounter a device that doesn't support SFTP. This is rare, but should it occur, you can switch to SCP mode in :doc:`intro_configuration`. -When speaking with remote machines, Ansible by default assumes you are using SSH keys. SSH keys are encouraged but password authentication can also be used where needed by supplying the option ``--ask-pass``. If using sudo features and when sudo requires a password, also supply ``--ask-sudo-pass``. +When speaking with remote machines, Ansible by default assumes you are using SSH keys. SSH keys are encouraged but password authentication can also be used where needed by supplying the option ``--ask-pass``. If using sudo features and when sudo requires a password, also supply ``--ask-become-pass`` (previously ``--ask-sudo-pass`` which has been depricated). While it may be common sense, it is worth sharing: Any management system benefits from being run near the machines being managed. If you are running Ansible in a cloud, consider running it from a machine inside that cloud. In most cases this will work better than on the open Internet. From a196c7d737db14ba7dc238be901a5bf108e135c0 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 13 Jan 2016 10:17:43 -0500 Subject: [PATCH 0358/1113] only send event if tqm exists fixes #13843 --- Makefile | 3 +++ lib/ansible/executor/playbook_executor.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f62cffb2df8..367987affce 100644 --- a/Makefile +++ b/Makefile @@ -167,6 +167,9 @@ install: sdist: clean docs $(PYTHON) setup.py sdist +sdist_upload: clean docs + $(PYTHON) setup.py sdist upload 2>&1 |tee upload.log + rpmcommon: $(MANPAGES) sdist @mkdir -p rpm-build @cp dist/*.gz rpm-build/ diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index eecaa66a62c..adaf92207b3 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -112,8 +112,8 @@ class PlaybookExecutor: salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: - self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) if self._tqm: + self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-<option> or syntax check play.vars[vname] = default From 739e4c0386b3b596d90bdcc95eae95ad98e4b810 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 13 Jan 2016 10:32:36 -0500 Subject: [PATCH 0359/1113] corrected info about windows module naming and loc --- docsite/rst/intro_windows.rst | 5 +---- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index 1adcc35010f..375e5984a4c 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -256,10 +256,7 @@ Developers: Supported modules and how it works Developing Ansible modules are covered in a `later section of the documentation <http://docs.ansible.com/developing_modules.html>`_, with a focus on Linux/Unix. What if you want to write Windows modules for Ansible though? -For Windows, Ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. - -Windows modules live in a "windows/" subfolder in the Ansible "library/" subtree. For example, if a module is named -"library/windows/win_ping", there will be embedded documentation in the "win_ping" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense. +For Windows, Ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. Windows modules in the core and extras repo live in a "windows/" subdir. Custom modules can go directly into the Ansible "library/" directories or those added in ansible.cfg. Documentation lives in a a `.py` file with the same name. For example, if a module is named "win_ping", there will be embedded documentation in the "win_ping.py" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense. Modules (ps1 files) should start as follows:: diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 5f6d0f73f42..b77bf7a9aa1 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 5f6d0f73f4203f05c7a51d906e6c04108d397b39 +Subproject commit b77bf7a9aa1889fbee75f3db17c89816ca7c7838 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 39d6066f512..4d2a20b2f43 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 39d6066f512b3ceaa4f16592c452cb26c17675f6 +Subproject commit 4d2a20b2f433db2492b6b3eb0554177fa42662e4 From 0b32e1586d7b8b3eda5e633dced76c1ba5e1d4c5 Mon Sep 17 00:00:00 2001 From: Alexey Shabalin <shaba@altlinux.org> Date: Wed, 13 Jan 2016 19:20:59 +0300 Subject: [PATCH 0360/1113] add detect Altlinux distributive --- lib/ansible/module_utils/facts.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 796ebc92bdd..a8c53eda3d9 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -119,6 +119,7 @@ class Facts(object): ('/etc/gentoo-release', 'Gentoo'), ('/etc/os-release', 'Debian'), ('/etc/lsb-release', 'Mandriva'), + ('/etc/altlinux-release', 'Altlinux'), ('/etc/os-release', 'NA'), ) SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } @@ -270,7 +271,7 @@ class Facts(object): OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat', XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse', SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo', - Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', + Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux', Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris', SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin', FreeBSD = 'FreeBSD', HPUX = 'HP-UX' @@ -323,7 +324,7 @@ class Facts(object): for (path, name) in Facts.OSDIST_LIST: if os.path.exists(path): if os.path.getsize(path) > 0: - if self.facts['distribution'] in ('Fedora', ): + if self.facts['distribution'] in ('Fedora', 'Altlinux', ): # Once we determine the value is one of these distros # we trust the values are always correct break @@ -356,6 +357,13 @@ class Facts(object): else: self.facts['distribution'] = data.split()[0] break + elif name == 'Altlinux': + data = get_file_content(path) + if 'ALT Linux' in data: + self.facts['distribution'] = name + else: + self.facts['distribution'] = data.split()[0] + break elif name == 'OtherLinux': data = get_file_content(path) if 'Amazon' in data: From 4958180333c668d0c7942b4a7c0c90e706e36bd0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 13 Jan 2016 12:34:12 -0800 Subject: [PATCH 0361/1113] use integer division instead of floating point division. Fixes #13855 --- lib/ansible/parsing/vault/__init__.py | 64 +++++++++++++-------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 1d4eeef4653..f38525e028d 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -220,69 +220,69 @@ class VaultEditor: def __init__(self, password): self.vault = VaultLib(password) - + def _shred_file_custom(self, tmp_path): """"Destroy a file, when shred (core-utils) is not available - - Unix `shred' destroys files "so that they can be recovered only with great difficulty with - specialised hardware, if at all". It is based on the method from the paper - "Secure Deletion of Data from Magnetic and Solid-State Memory", + + Unix `shred' destroys files "so that they can be recovered only with great difficulty with + specialised hardware, if at all". It is based on the method from the paper + "Secure Deletion of Data from Magnetic and Solid-State Memory", Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996). - + We do not go to that length to re-implement shred in Python; instead, overwriting with a block - of random data should suffice. - + of random data should suffice. + See https://github.com/ansible/ansible/pull/13700 . """ - + file_len = os.path.getsize(tmp_path) max_chunk_len = min(1024*1024*2, file_len) - + passes = 3 with open(tmp_path, "wb") as fh: for _ in range(passes): fh.seek(0, 0) # get a random chunk of data, each pass with other length - chunk_len = random.randint(max_chunk_len/2, max_chunk_len) + chunk_len = random.randint(max_chunk_len//2, max_chunk_len) data = os.urandom(chunk_len) - + for _ in range(0, file_len // chunk_len): fh.write(data) fh.write(data[:file_len % chunk_len]) - + assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy os.fsync(fh) - - + + def _shred_file(self, tmp_path): """Securely destroy a decrypted file - Note standard limitations of GNU shred apply (For flash, overwriting would have no effect + Note standard limitations of GNU shred apply (For flash, overwriting would have no effect due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never - guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks), - it is a non-issue. - - Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is - a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on + guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks), + it is a non-issue. + + Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is + a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on a custom shredding method. """ - + if not os.path.isfile(tmp_path): # file is already gone - return - + return + try: r = call(['shred', tmp_path]) except OSError as e: - # shred is not available on this system, or some other error occured. + # shred is not available on this system, or some other error occured. r = 1 - + if r != 0: - # we could not successfully execute unix shred; therefore, do custom shred. + # we could not successfully execute unix shred; therefore, do custom shred. self._shred_file_custom(tmp_path) - + os.remove(tmp_path) - + def _edit_file_helper(self, filename, existing_data=None, force_save=False): # Create a tempfile @@ -294,11 +294,11 @@ class VaultEditor: # drop the user into an editor on the tmp file try: call(self._editor_shell_command(tmp_path)) - except: + except: # whatever happens, destroy the decrypted file self._shred_file(tmp_path) - raise - + raise + tmpdata = self.read_data(tmp_path) # Do nothing if the content has not changed From be9d817618bf4f0c9098f66b5afdc315476be2dd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 13 Jan 2016 12:35:05 -0800 Subject: [PATCH 0362/1113] Make example of deprecated args clearer --- docsite/rst/porting_guide_2.0.rst | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 543be052bdc..f22b44257db 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -56,7 +56,7 @@ uses key=value escaping which has not changed. The other option is to check for "msg": "Testing some things" * When specifying complex args as a variable, the variable must use the full jinja2 - variable syntax ('{{var_name}}') - bare variable names there are no longer accepted. + variable syntax (```{{var_name}}```) - bare variable names there are no longer accepted. In fact, even specifying args with variables has been deprecated, and will not be allowed in future versions:: @@ -100,7 +100,14 @@ While all items listed here will show a deprecation warning message, they still debug_params: msg: "hello there" tasks: + # These are both deprecated: - debug: "{{debug_params}}" + - debug: + args: "{{debug_params}}" + + # Use this instead: + - debug: + msg: "{{debug_params['msg']}}" * Host patterns should use a comma (,) or colon (:) instead of a semicolon (;) to separate hosts/groups in the pattern. * Ranges specified in host patterns should use the [x:y] syntax, instead of [x-y]. From 8068f23bad4d0fb8891e5dc1855f8b39e8935e88 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 13 Jan 2016 13:08:33 -0800 Subject: [PATCH 0363/1113] do_encrypt import needed to move as well Fixes #13861 --- lib/ansible/executor/playbook_executor.py | 1 - lib/ansible/utils/display.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index adaf92207b3..bdb08993e3f 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -31,7 +31,6 @@ from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook from ansible.template import Templar -from ansible.utils.encrypt import do_encrypt from ansible.utils.unicode import to_unicode try: diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 0447585fa31..385bc51e060 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -35,6 +35,7 @@ from multiprocessing import Lock from ansible import constants as C from ansible.errors import AnsibleError from ansible.utils.color import stringc +from ansible.utils.encrypt import do_encrypt from ansible.utils.unicode import to_bytes, to_unicode try: From f72d380bcf3c12bc6790d86135633e72430fbc86 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 13 Jan 2016 13:15:08 -0800 Subject: [PATCH 0364/1113] Specify all variables in the description to make it more clear --- docsite/rst/porting_guide_2.0.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index f22b44257db..a26763fc14a 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -92,7 +92,7 @@ While all items listed here will show a deprecation warning message, they still * Bare variables in `with_` loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity. * The ansible-galaxy text format requirements file. Users should use the YAML format for requirements instead. * Undefined variables within a `with_` loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error. -* Using variables for task parameters is unsafe and will be removed in a future version. For example:: +* Using dictionary variables to set all task parameters is unsafe and will be removed in a future version. For example:: - hosts: localhost gather_facts: no From 58f387a6dd7f53770c50c788ff083184bd9ea682 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 13 Jan 2016 14:04:56 -0800 Subject: [PATCH 0365/1113] Fix circular import --- lib/ansible/utils/display.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 385bc51e060..57cee14ffe3 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -35,7 +35,6 @@ from multiprocessing import Lock from ansible import constants as C from ansible.errors import AnsibleError from ansible.utils.color import stringc -from ansible.utils.encrypt import do_encrypt from ansible.utils.unicode import to_bytes, to_unicode try: @@ -313,6 +312,8 @@ class Display: result = default if encrypt: + # Circular import because encrypt needs a display class + from ansible.utils.encrypt import do_encrypt result = do_encrypt(result, encrypt, salt_size, salt) # handle utf-8 chars From a56c0bc27b07356c3639c0ac39805d4e5dd91171 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 13 Jan 2016 11:00:17 -0500 Subject: [PATCH 0366/1113] module invocation info depends on verbosity since we were removing it on display, this saves us even transmiting it back --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index fb300bc42c4..b420f18e6e8 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -579,7 +579,7 @@ class AnsibleModule(object): self._set_defaults(pre=False) - if not self.no_log: + if not self.no_log and self._verbosity >= 3: self._log_invocation() # finally, make sure we're in a sane working dir From 4de4d59d7a91aafa8127e6d48b303ea0abcc204b Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 13 Jan 2016 18:06:19 -0500 Subject: [PATCH 0367/1113] changed examples to not use 'port' directive --- docsite/rst/playbooks_roles.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 2e1173acda9..76bff6666c0 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -213,8 +213,8 @@ Also, should you wish to parameterize roles, by adding variables, you can do so, - hosts: webservers roles: - common - - { role: foo_app_instance, dir: '/opt/a', port: 5000 } - - { role: foo_app_instance, dir: '/opt/b', port: 5001 } + - { role: foo_app_instance, dir: '/opt/a', app_port: 5000 } + - { role: foo_app_instance, dir: '/opt/b', app_port: 5001 } While it's probably not something you should do often, you can also conditionally apply roles like so:: @@ -284,7 +284,7 @@ a list of roles and parameters to insert before the specified role, such as the --- dependencies: - { role: common, some_parameter: 3 } - - { role: apache, port: 80 } + - { role: apache, appache_port: 80 } - { role: postgres, dbname: blarg, other_parameter: 12 } Role dependencies can also be specified as a full path, just like top level roles:: From 6c0b4bc56e507b8ead48e5b3a3c8ffbc55a72f74 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 13 Jan 2016 17:22:43 -0800 Subject: [PATCH 0368/1113] Add python-setuptools to the requirements for running ansible as python-setuptools contains the egginfo needed to make pkg_resources work. --- packaging/debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/debian/control b/packaging/debian/control index 462fd5caf17..13f0c5b42de 100644 --- a/packaging/debian/control +++ b/packaging/debian/control @@ -8,7 +8,7 @@ Homepage: http://ansible.github.com/ Package: ansible Architecture: all -Depends: python, python-support (>= 0.90), python-jinja2, python-yaml, python-paramiko, python-httplib2, python-six, python-crypto (>= 2.6), sshpass, ${misc:Depends} +Depends: python, python-support (>= 0.90), python-jinja2, python-yaml, python-paramiko, python-httplib2, python-six, python-crypto (>= 2.6), python-setuptools, sshpass, ${misc:Depends} Description: A radically simple IT automation platform A radically simple IT automation platform that makes your applications and systems easier to deploy. Avoid writing scripts or custom code to deploy and From 965602882aadb985c4726a9024dd601ec238ef79 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 13 Jan 2016 20:07:04 -0800 Subject: [PATCH 0369/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b77bf7a9aa1..fd59dccdd74 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b77bf7a9aa1889fbee75f3db17c89816ca7c7838 +Subproject commit fd59dccdd7496733ddb8388caf6a701775c6e527 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 4d2a20b2f43..38dfe233360 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 4d2a20b2f433db2492b6b3eb0554177fa42662e4 +Subproject commit 38dfe23336086a6a00d6abce42d7790633aefb9f From b1a56051bda80f75313833c2436ce6bfc333eb7a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 13 Jan 2016 20:50:19 -0800 Subject: [PATCH 0370/1113] Prevent traceback. https://github.com/ansible/ansible/issues/13743#issuecomment-171520585 In some circumstance, the file fails to open. When that occurs, we can't try to close it in the finally clause. Using a context manager is the cleanest way to change the code to account for that case. --- lib/ansible/galaxy/role.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py index 36b1e0fbbba..700664c4cd0 100644 --- a/lib/ansible/galaxy/role.py +++ b/lib/ansible/galaxy/role.py @@ -130,13 +130,11 @@ class GalaxyRole(object): install_date=datetime.datetime.utcnow().strftime("%c"), ) info_path = os.path.join(self.path, self.META_INSTALL) - try: - f = open(info_path, 'w+') - self._install_info = yaml.safe_dump(info, f) - except: - return False - finally: - f.close() + with open(info_path, 'w+') as f: + try: + self._install_info = yaml.safe_dump(info, f) + except: + return False return True From 9be8ecda0668631a69883ec69ae6eeab1abef848 Mon Sep 17 00:00:00 2001 From: Alexey Shabalin <shaba@altlinux.org> Date: Thu, 14 Jan 2016 13:01:49 +0300 Subject: [PATCH 0371/1113] Add support ssh configs from /etc/openssh. In Altlinux system config dir for openssh is /etc/openssh. --- lib/ansible/module_utils/facts.py | 2 ++ lib/ansible/module_utils/known_hosts.py | 1 + lib/ansible/plugins/connection/paramiko_ssh.py | 12 +++++++----- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index a8c53eda3d9..3ce5b14820a 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -536,6 +536,8 @@ class Facts(object): keydir = '/etc/ssh' else: keydir = '/etc' + if self.facts['distribution'] == 'Altlinux': + keydir = '/etc/openssh' else: keydir = '/etc/ssh' diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 52b0bb74b0f..09d08b20ef3 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -119,6 +119,7 @@ def not_in_host_file(self, host): host_file_list.append(user_host_file) host_file_list.append("/etc/ssh/ssh_known_hosts") host_file_list.append("/etc/ssh/ssh_known_hosts2") + host_file_list.append("/etc/openssh/ssh_known_hosts") hfiles_not_found = 0 for hf in host_file_list: diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py index ab9ce90db95..52bbdf05d8b 100644 --- a/lib/ansible/plugins/connection/paramiko_ssh.py +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -151,11 +151,13 @@ class Connection(ConnectionBase): self.keyfile = os.path.expanduser("~/.ssh/known_hosts") if C.HOST_KEY_CHECKING: - try: - #TODO: check if we need to look at several possible locations, possible for loop - ssh.load_system_host_keys("/etc/ssh/ssh_known_hosts") - except IOError: - pass # file was not found, but not required to function + for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts"): + try: + #TODO: check if we need to look at several possible locations, possible for loop + ssh.load_system_host_keys(ssh_known_hosts) + break + except IOError: + pass # file was not found, but not required to function ssh.load_system_host_keys() ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self)) From 9d1b2806898f09d8cf5e34a061532a385d433897 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 14 Jan 2016 10:23:35 -0500 Subject: [PATCH 0372/1113] now combine vars errors dump vars very hard to debug w/o knowing what vars were being merged at the time of the error --- lib/ansible/utils/vars.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/utils/vars.py b/lib/ansible/utils/vars.py index a6e42cefa14..569bad2847a 100644 --- a/lib/ansible/utils/vars.py +++ b/lib/ansible/utils/vars.py @@ -20,7 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import ast -from json import JSONEncoder +from json import dumps from collections import MutableMapping from ansible.compat.six import iteritems, string_types @@ -43,9 +43,9 @@ def _validate_mutable_mappings(a, b): # a variable number of arguments instead. if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)): - raise AnsibleError("failed to combine variables, expected dicts but" - " got a '{0}' and a '{1}'".format( - a.__class__.__name__, b.__class__.__name__)) + raise AnsibleError("failed to combine variables, expected dicts but got a '{0}' and a '{1}': \n{2}\n{3}".format( + a.__class__.__name__, b.__class__.__name__, dumps(a), dumps(b)) + ) def combine_vars(a, b): """ From c14eece0c6cd77e9fcdb6206b05de021dac0847a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 14 Jan 2016 10:24:34 -0500 Subject: [PATCH 0373/1113] md5 now uses smaller salt fixes #13891 --- lib/ansible/plugins/filter/core.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py index dc9acb4d092..fed5097d919 100644 --- a/lib/ansible/plugins/filter/core.py +++ b/lib/ansible/plugins/filter/core.py @@ -225,7 +225,11 @@ def get_encrypted_password(password, hashtype='sha512', salt=None): if hashtype in cryptmethod: if salt is None: r = SystemRandom() - salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)]) + if hashtype in ['md5']: + saltsize = 8 + else: + saltsize = 16 + salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(saltsize)]) if not HAS_PASSLIB: if sys.platform.startswith('darwin'): From 0b86aa62e1d70e7ed9162df52512edee6e55aa12 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 14 Jan 2016 11:54:22 -0500 Subject: [PATCH 0374/1113] Hack to work around callback API change for v2_playbook_on_start --- lib/ansible/executor/task_queue_manager.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index ab46d6f78b8..ed9de6603be 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -288,7 +288,20 @@ class TaskQueueManager: for method in methods: if method is not None: try: - method(*args, **kwargs) + # temporary hack, required due to a change in the callback API, so + # we don't break backwards compatibility with callbacks which were + # designed to use the original API + # FIXME: target for removal and revert to the original code here + # after a year (2017-01-14) + if method_name == 'v2_playbook_on_start': + import inspect + (f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method) + if 'playbook' in args: + method(*args, **kwargs) + else: + method() + else: + method(*args, **kwargs) except Exception as e: import traceback orig_tb = traceback.format_exc() From abc82fee13d5c2a08ce6cb990bdad01487420805 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 14 Jan 2016 11:57:12 -0500 Subject: [PATCH 0375/1113] Fix typo in 0b86aa6 --- lib/ansible/executor/task_queue_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index ed9de6603be..13840de5964 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -296,7 +296,7 @@ class TaskQueueManager: if method_name == 'v2_playbook_on_start': import inspect (f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method) - if 'playbook' in args: + if 'playbook' in f_args: method(*args, **kwargs) else: method() From 94fa9c2a7a9741eb1604d71eb063050340f610b5 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 14 Jan 2016 12:22:19 -0500 Subject: [PATCH 0376/1113] test aliases to enable gramatical consistancy keeps backwards compat by not removing the previouslly non grammer matching states and introduces new ones so user can decide which one he wants (or keep both and still be inconsistent to annoy those that care) --- lib/ansible/plugins/test/core.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/test/core.py b/lib/ansible/plugins/test/core.py index 1bd789260f7..fb9e0fb86e7 100644 --- a/lib/ansible/plugins/test/core.py +++ b/lib/ansible/plugins/test/core.py @@ -89,14 +89,18 @@ class TestModule(object): def tests(self): return { # failure testing - 'failed' : failed, - 'success' : success, + 'failed' : failed, + 'failure' : failed, + 'success' : success, + 'succeeded' : success, # changed testing 'changed' : changed, + 'change' : changed, # skip testing 'skipped' : skipped, + 'skip' : skipped, # regex 'match': match, From ec95f50cb6ea48810b737993e4db3672846b31ab Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 14 Jan 2016 12:28:34 -0500 Subject: [PATCH 0377/1113] updated docs with new test grammer examples --- docsite/rst/playbooks_conditionals.rst | 2 +- docsite/rst/playbooks_filters.rst | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_conditionals.rst b/docsite/rst/playbooks_conditionals.rst index 15d397c7ac1..47cc844f457 100644 --- a/docsite/rst/playbooks_conditionals.rst +++ b/docsite/rst/playbooks_conditionals.rst @@ -47,7 +47,7 @@ decide to do something conditionally based on success or failure:: - command: /bin/something when: result|failed - command: /bin/something_else - when: result|success + when: result|succeeded - command: /bin/still/something_else when: result|skipped diff --git a/docsite/rst/playbooks_filters.rst b/docsite/rst/playbooks_filters.rst index cae65d71bee..05ff830e3bf 100644 --- a/docsite/rst/playbooks_filters.rst +++ b/docsite/rst/playbooks_filters.rst @@ -58,12 +58,17 @@ The following tasks are illustrative of how filters can be used with conditional - debug: msg="it changed" when: result|changed + - debug: msg="it succeeded in Ansible >= 2.1" + when: result|succeeded + - debug: msg="it succeeded" when: result|success - debug: msg="it was skipped" when: result|skipped +.. note:: From 2.1 You can also use success, failure, change, skip so the grammer matches, for those that want to be strict about it. + .. _forcing_variables_to_be_defined: Forcing Variables To Be Defined From e5ea57646186dd4ec834e0c824c3942a60278db0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= <yannig.perre@gmail.com> Date: Thu, 14 Jan 2016 22:32:05 +0100 Subject: [PATCH 0378/1113] Allow Ansible to return error with unicode within it. Fix for https://github.com/ansible/ansible/issues/13899 --- lib/ansible/errors/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py index 017272af7ca..2185a83e4c0 100644 --- a/lib/ansible/errors/__init__.py +++ b/lib/ansible/errors/__init__.py @@ -54,7 +54,7 @@ class AnsibleError(Exception): if obj and isinstance(obj, AnsibleBaseYAMLObject): extended_error = self._get_extended_error() if extended_error: - self.message = 'ERROR! %s\n\n%s' % (message, to_str(extended_error)) + self.message = 'ERROR! %s\n\n%s' % (to_str(message), to_str(extended_error)) else: self.message = 'ERROR! %s' % message From c2d314dec5bd949b2d9346325c66068bc4df2201 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 14 Jan 2016 17:55:44 -0500 Subject: [PATCH 0379/1113] fix issue with add_hosts overwriting existing vars fixes github.com/ansible/ansible-modules-core/issues/2799 and is alternative to #13841 --- lib/ansible/plugins/strategy/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 7b2a3794efc..7e726f0d34a 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -40,6 +40,7 @@ from ansible.playbook.included_file import IncludedFile from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader from ansible.template import Templar from ansible.vars.unsafe_proxy import wrap_var +from ansible.vars import combine_vars try: from __main__ import display @@ -372,9 +373,8 @@ class StrategyBase: allgroup.add_host(new_host) # Set/update the vars for this host - new_vars = host_info.get('host_vars', dict()) - new_host.vars = self._inventory.get_host_vars(new_host) - new_host.vars.update(new_vars) + new_host.vars = combine_vars(new_host.vars, self._inventory.get_host_vars(new_host)) + new_host.vars = combine_vars(new_host.vars, host_info.get('host_vars', dict())) new_groups = host_info.get('groups', []) for group_name in new_groups: From a311872c5b1631ad7c2fcc874696008a4a70c0b7 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 14 Jan 2016 18:06:54 -0500 Subject: [PATCH 0380/1113] overwrite/merge not remove prev inventory data --- lib/ansible/inventory/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 885005960f5..5885d28014c 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -736,11 +736,11 @@ class Inventory(object): if group and host is None: # load vars in dir/group_vars/name_of_group base_path = os.path.realpath(os.path.join(basedir, "group_vars/%s" % group.name)) - results = self._variable_manager.add_group_vars_file(base_path, self._loader) + results = combine_vars(results, self._variable_manager.add_group_vars_file(base_path, self._loader)) elif host and group is None: # same for hostvars in dir/host_vars/name_of_host base_path = os.path.realpath(os.path.join(basedir, "host_vars/%s" % host.name)) - results = self._variable_manager.add_host_vars_file(base_path, self._loader) + results = combine_vars(results, self._variable_manager.add_host_vars_file(base_path, self._loader)) # all done, results is a dictionary of variables for this particular host. return results From 1b82de2354b92d2683c372a4fbd4b905fab2f19d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 14 Jan 2016 17:21:15 -0800 Subject: [PATCH 0381/1113] Non-newstyle modules can't use pipelining This is because we pass arguments to non-newstyle modules via an external file. If we pipeline, then the interpreter thinks it has to run the arguments as the script instead of what is piped in via stdin. --- lib/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 0f5ba91862e..0b33d576c0d 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -430,7 +430,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): cmd = "" in_data = None - if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES: + if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new': in_data = module_data else: if remote_module_path: From 1825b4a9c74417764f0d6f0c4a4a515a55a07735 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 15 Jan 2016 13:14:27 -0500 Subject: [PATCH 0382/1113] Fix any_errors_fatal incorrect implementation in 2.0 Also adds that flag to blocks. Fixes #13744 --- lib/ansible/executor/playbook_executor.py | 4 +--- lib/ansible/playbook/block.py | 11 +++++++++++ lib/ansible/playbook/task.py | 19 +++++++++++-------- lib/ansible/plugins/strategy/free.py | 2 ++ lib/ansible/plugins/strategy/linear.py | 12 ++++++++++++ 5 files changed, 37 insertions(+), 11 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index bdb08993e3f..bcfe1bebbe9 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -151,9 +151,7 @@ class PlaybookExecutor: # conditions are met, we break out, otherwise we only break out if the entire # batch failed failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - if new_play.any_errors_fatal and failed_hosts_count > 0: - break - elif new_play.max_fail_percentage is not None and \ + if new_play.max_fail_percentage is not None and \ int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0): break elif len(batch) == failed_hosts_count: diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index f2d9c82833a..be73c5d8acd 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -35,6 +35,7 @@ class Block(Base, Become, Conditional, Taggable): _always = FieldAttribute(isa='list', default=[]) _delegate_to = FieldAttribute(isa='list') _delegate_facts = FieldAttribute(isa='bool', default=False) + _any_errors_fatal = FieldAttribute(isa='bool') # for future consideration? this would be functionally # similar to the 'else' clause for exceptions @@ -330,6 +331,16 @@ class Block(Base, Become, Conditional, Taggable): return environment + def _get_attr_any_errors_fatal(self): + ''' + Override for the 'tags' getattr fetcher, used from Base. + ''' + any_errors_fatal = self._attributes['any_errors_fatal'] + if hasattr(self, '_get_parent_attribute'): + if self._get_parent_attribute('any_errors_fatal'): + any_errors_fatal = True + return any_errors_fatal + def filter_tagged_tasks(self, play_context, all_vars): ''' Creates a new block, with task lists filtered based on the tags contained diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 62b8cbc999b..154ff53d5e3 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -216,14 +216,6 @@ class Task(Base, Conditional, Taggable, Become): return super(Task, self).preprocess_data(new_ds) - def _load_any_errors_fatal(self, attr, value): - ''' - Exists only to show a deprecation warning, as this attribute is not valid - at the task level. - ''' - display.deprecated("Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only") - return None - def post_validate(self, templar): ''' Override of base class post_validate, to also do final validation on @@ -422,3 +414,14 @@ class Task(Base, Conditional, Taggable, Become): if parent_environment is not None: environment = self._extend_value(environment, parent_environment) return environment + + def _get_attr_any_errors_fatal(self): + ''' + Override for the 'tags' getattr fetcher, used from Base. + ''' + any_errors_fatal = self._attributes['any_errors_fatal'] + if hasattr(self, '_get_parent_attribute'): + if self._get_parent_attribute('any_errors_fatal'): + any_errors_fatal = True + return any_errors_fatal + diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index 976d33abba0..da123ce3b73 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -122,6 +122,8 @@ class StrategyModule(StrategyBase): else: # handle step if needed, skip meta actions as they are used internally if not self._step or self._take_step(task, host_name): + if task.any_errors_fatal: + display.warning("Using any_errors_fatal with the free strategy is not supported, as tasks are executed independently on each host") self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False) self._queue_task(host, task, task_vars, play_context) diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index f441b88fe3d..40c435ca539 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -280,6 +280,7 @@ class StrategyModule(StrategyBase): except AnsibleError as e: return False + include_failure = False if len(included_files) > 0: display.debug("we have included files to process") noop_task = Task() @@ -325,6 +326,7 @@ class StrategyModule(StrategyBase): self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) display.error(e, wrap_text=False) + include_failure = True continue # finally go through all of the hosts and append the @@ -338,6 +340,16 @@ class StrategyModule(StrategyBase): display.debug("done processing included files") display.debug("results queue empty") + + display.debug("checking for any_errors_fatal") + had_failure = include_failure + for res in results: + if res.is_failed() or res.is_unreachable(): + had_failure = True + break + if task and task.any_errors_fatal and had_failure: + return False + except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed From 4e95c9f1eb09998d4f1d57119dc5f69b4e3a90be Mon Sep 17 00:00:00 2001 From: Denis <dkasak@users.noreply.github.com> Date: Fri, 15 Jan 2016 21:01:05 +0100 Subject: [PATCH 0383/1113] Fix typos. --- docs/man/man1/ansible-playbook.1.asciidoc.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in index 5686162f212..82181982fb7 100644 --- a/docs/man/man1/ansible-playbook.1.asciidoc.in +++ b/docs/man/man1/ansible-playbook.1.asciidoc.in @@ -96,7 +96,7 @@ Show help page and exit *-i* 'PATH', *--inventory=*'PATH':: The 'PATH' to the inventory, which defaults to '/etc/ansible/hosts'. -Alternatively you can use a comma separated list of hosts or single host with traling comma 'host,'. +Alternatively, you can use a comma-separated list of hosts or a single host with a trailing comma 'host,'. *-l* 'SUBSET', *--limit=*'SUBSET':: From 4e5dc44289a2b1d58b633d4096df7039e98b62ca Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Fri, 15 Jan 2016 15:07:45 -0500 Subject: [PATCH 0384/1113] fixes documentation string --- lib/ansible/module_utils/nxos.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py index f75ac5123fc..41e4269ade3 100644 --- a/lib/ansible/module_utils/nxos.py +++ b/lib/ansible/module_utils/nxos.py @@ -191,7 +191,7 @@ class NxosModule(AnsibleModule): return resp['ins_api']['outputs']['output']['body'] def get_module(**kwargs): - """Return instance of EosModule + """Return instance of NxosModule """ argument_spec = NET_COMMON_ARGS.copy() From 8e059d058b20a6757e946e4d8718c6c7918a5bd4 Mon Sep 17 00:00:00 2001 From: J Levitt <jlevitt@gmail.com> Date: Fri, 15 Jan 2016 14:17:23 -0600 Subject: [PATCH 0385/1113] Added example of running commands on a remote Windows Server Added example of running commands on a remote Windows Server --- docsite/rst/intro_windows.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index 375e5984a4c..b9b195643be 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -319,6 +319,14 @@ Running individual commands uses the 'raw' module, as opposed to the shell or co register: ipconfig - debug: var=ipconfig +Running common DOS commands like 'del", 'move', or 'copy" is unlikely to work on a remote Windows Server using Powershell, but they can work by prefacing the commands with "CMD /C" and enclosing the command in double quotes as in this example:: + + - name: another raw module example + hosts: windows + tasks: + - name: Move file on remote Windows Server from one location to another + raw: CMD /C "MOVE /Y C:\teststuff\myfile.conf C:\builds\smtp.conf" + And for a final example, here's how to use the win_stat module to test for file existence. Note that the data returned by the win_stat module is slightly different than what is provided by the Linux equivalent:: - name: test stat module From e6ab0daa4ba5cf9b6e2cbe88f5f4a176c33707eb Mon Sep 17 00:00:00 2001 From: Ryan Groten <rgroten@gmail.com> Date: Fri, 15 Jan 2016 14:11:17 -0700 Subject: [PATCH 0386/1113] minor wording fixes --- docsite/rst/intro_adhoc.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index 61ba33523a6..e9abdccc95b 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -88,7 +88,7 @@ The ``-f 10`` in the above specifies the usage of 10 simultaneous processes to use. You can also set this in :doc:`intro_configuration` to avoid setting it again. The default is actually 5, which is really small and conservative. You are probably going to want to talk to a lot more simultaneous hosts so feel free to crank this up. If you have more hosts than the value set for the fork count, Ansible will talk to them, but it will -take a little longer. Feel free to push this value as high as your system can handle it! +take a little longer. Feel free to push this value as high as your system can handle! You can also select what Ansible "module" you want to run. Normally commands also take a ``-m`` for module name, but the default module name is 'command', so we didn't need to @@ -170,7 +170,7 @@ Ensure a package is not installed:: Ansible has modules for managing packages under many platforms. If your package manager does not have a module available for it, you can install -for other packages using the command module or (better!) contribute a module +packages using the command module or (better!) contribute a module for other package managers. Stop by the mailing list for info/details. .. _users_and_groups: @@ -249,7 +249,7 @@ very quickly. After the time limit (in seconds) runs out (``-B``), the process o the remote nodes will be terminated. Typically you'll only be backgrounding long-running -shell commands or software upgrades only. Backgrounding the copy module does not do a background file transfer. :doc:`Playbooks <playbooks>` also support polling, and have a simplified syntax for this. +shell commands or software upgrades. Backgrounding the copy module does not do a background file transfer. :doc:`Playbooks <playbooks>` also support polling, and have a simplified syntax for this. .. _checking_facts: From 36aa89ac7e77dde7fba7a14db21845a456577b46 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 15 Jan 2016 16:27:15 -0800 Subject: [PATCH 0387/1113] Fix erroneous fetch fail when fail_on_missing is set to False Fixes #13832 --- lib/ansible/plugins/action/fetch.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py index 7c7f53de63d..0dacd021457 100644 --- a/lib/ansible/plugins/action/fetch.py +++ b/lib/ansible/plugins/action/fetch.py @@ -70,7 +70,7 @@ class ActionModule(ActionBase): if remote_checksum in ('1', '2', None): slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp) if slurpres.get('failed'): - if remote_checksum == '1' and not fail_on_missing: + if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'): result['msg'] = "the remote file does not exist, not transferring, ignored" result['file'] = source result['changed'] = False @@ -171,7 +171,9 @@ class ActionModule(ActionBase): new_md5 = None if validate_checksum and new_checksum != remote_checksum: - result.update(dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) + result.update(dict(failed=True, md5sum=new_md5, + msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, + checksum=new_checksum, remote_checksum=remote_checksum)) else: result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)) else: From 2cde16a06cf0919c53d12a9fb40c4b114e8c5e83 Mon Sep 17 00:00:00 2001 From: Dan Langille <dan.langille+git@gmail.com> Date: Sat, 16 Jan 2016 14:37:04 -0500 Subject: [PATCH 0388/1113] Add correct default options for sudo_flags see https://github.com/ansible/ansible/blob/devel/lib/ansible/constants.py#L181 --- docsite/rst/intro_configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index ccfb456ed93..d36bb6dbd60 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -591,7 +591,7 @@ Additional flags to pass to sudo when engaging sudo support. The default is '-H of the original user. In some situations you may wish to add or remove flags, but in general most users will not need to change this setting:: - sudo_flags=-H + sudo_flags=-H -S -n .. _sudo_user: From 27657084509edfe2ceebfdfc1dbdef48772ef4a7 Mon Sep 17 00:00:00 2001 From: Dan Langille <dan.langille+git@gmail.com> Date: Sat, 16 Jan 2016 15:28:41 -0500 Subject: [PATCH 0389/1113] Specify the correct default values for sudo_flags The correct default options for sudo_flags can be found at: https://github.com/ansible/ansible/blob/devel/lib/ansible/constants.py#L181 Slightly alter explanation of '-H' so as not to confuse it with -E, --preserve-env (which preserves existing environment variables). When adding the two other options, include short explanations of those options. Add note about '-n', which did not appear in 1.x I believe, and which bit me. --- docsite/rst/intro_configuration.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index d36bb6dbd60..51a1ad1e588 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -587,9 +587,10 @@ the sudo implementation is matching CLI flags with the standard sudo:: sudo_flags ========== -Additional flags to pass to sudo when engaging sudo support. The default is '-H' which preserves the $HOME environment variable -of the original user. In some situations you may wish to add or remove flags, but in general most users -will not need to change this setting:: +Additional flags to pass to sudo when engaging sudo support. The default is '-H -S -n' which sets the HOME environment +variable, prompts for passwords via STDIN, and avoids prompting the user for input of any kind. Note that '-n' will conflict +with using password-less sudo auth, such as pam_ssh_agent_auth. In some situations you may wish to add or remove flags, but +in general most users will not need to change this setting::: sudo_flags=-H -S -n From c42484a0297a9372a6864e06849cb540c5b272ca Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 18 Jan 2016 13:36:40 -0500 Subject: [PATCH 0390/1113] Minor cleanup when reassigning play context to reused connections * Relocate the assignment of the host address to the remote_addr field in the play context, which was only done when the connection was created (it's now done after the post_validate() is called on the play context) * Make the assignment of the play context to the connection an else, since it's not required if the connection is not reused --- lib/ansible/executor/task_executor.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 9b7ac8b1563..5f9cccae4c2 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -316,6 +316,11 @@ class TaskExecutor: # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) + # now that the play context is finalized, if the remote_addr is not set + # default to using the host's address field as the remote address + if not self._play_context.remote_addr: + self._play_context.remote_addr = self._host.address + # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) @@ -365,9 +370,10 @@ class TaskExecutor: if not self._connection or not getattr(self._connection, 'connected', False): self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides(host=self._host) - #If connection is reused, its _play_context is no longer valid and needs to be replaced - #This fixes issues with tasks running sudo in a loop and having the success_key incorrect in the second iteration - self._connection._play_context = self._play_context + else: + # if connection is reused, its _play_context is no longer valid and needs + # to be replaced with the one templated above, in case other data changed + self._connection._play_context = self._play_context self._handler = self._get_action_handler(connection=self._connection, templar=templar) @@ -547,9 +553,6 @@ class TaskExecutor: correct connection object from the list of connection plugins ''' - if not self._play_context.remote_addr: - self._play_context.remote_addr = self._host.address - if self._task.delegate_to is not None: # since we're delegating, we don't want to use interpreter values # which would have been set for the original target host From 46e515131e8a25943efe65ffe32c45f4e1246a9b Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 18 Jan 2016 14:32:44 -0500 Subject: [PATCH 0391/1113] Allow module args as k=v pairs when using the module: option with local_action This task format is valid in 1.x, but was broken in 2.x: - local_action: module: shell echo "hello world" --- lib/ansible/parsing/mod_args.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index 86b2d0d996d..fbf5e1c3d61 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -222,18 +222,21 @@ class ModuleArgsParser: action = None args = None + actions_allowing_raw = ('command', 'shell', 'script', 'raw') if isinstance(thing, dict): # form is like: copy: { src: 'a', dest: 'b' } ... common for structured (aka "complex") args thing = thing.copy() if 'module' in thing: - action = thing['module'] + action, module_args = self._split_module_string(thing['module']) args = thing.copy() + check_raw = action in actions_allowing_raw + args.update(parse_kv(module_args, check_raw=check_raw)) del args['module'] elif isinstance(thing, string_types): # form is like: copy: src=a dest=b ... common shorthand throughout ansible (action, args) = self._split_module_string(thing) - check_raw = action in ('command', 'shell', 'script', 'raw') + check_raw = action in actions_allowing_raw args = parse_kv(args, check_raw=check_raw) else: From b5058736cee5df234732f8f4854cd0e5f676dd60 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 18 Jan 2016 14:50:20 -0500 Subject: [PATCH 0392/1113] Fix set_fact + run_once to assign variables to all hosts in the list Fixes #13921 --- lib/ansible/plugins/strategy/__init__.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 7e726f0d34a..00744dbf730 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -309,23 +309,23 @@ class StrategyBase: else: actual_host = host + if task.run_once: + host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] + else: + host_list = [actual_host] + if result[0] == 'set_host_var': var_name = result[4] var_value = result[5] - - if task.run_once: - host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] - else: - host_list = [actual_host] - for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] - if task.action == 'set_fact': - self._variable_manager.set_nonpersistent_facts(actual_host, facts) - else: - self._variable_manager.set_host_facts(actual_host, facts) + for target_host in host_list: + if task.action == 'set_fact': + self._variable_manager.set_nonpersistent_facts(target_host, facts) + else: + self._variable_manager.set_host_facts(target_host, facts) else: raise AnsibleError("unknown result message received: %s" % result[0]) From 83069a38d5c2acd4537f940bd7fbd3c37766d752 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 15 Jan 2016 10:25:56 -0500 Subject: [PATCH 0393/1113] better init detection --- lib/ansible/module_utils/facts.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index a8c53eda3d9..a214b6bb30d 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -555,16 +555,16 @@ class Facts(object): self.facts['pkg_mgr'] = 'openbsd_pkg' def get_service_mgr_facts(self): - #TODO: detect more custom init setups like bootscripts, dmd, s6, etc + #TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, runit, etc # also other OSs other than linux might need to check across several possible candidates # try various forms of querying pid 1 - proc_1 = get_file_content('/proc/1/comm') + proc_1 = os.path.basename(get_file_content('/proc/1/comm')) if proc_1 is None: rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True) - if proc_1 in ['init', '/sbin/init', 'bash']: - # many systems return init, so this cannot be trusted, bash is from docker + if proc_1 == 'init' or proc_1.endswith('sh'): + # many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container proc_1 = None # if not init/None it should be an identifiable or custom init, so we are done! @@ -578,7 +578,7 @@ class Facts(object): self.facts['service_mgr'] = 'launchd' else: self.facts['service_mgr'] = 'systemstarter' - elif self.facts['system'].endswith('BSD') or self.facts['system'] in ['Bitrig', 'DragonFly']: + elif 'BSD' in self.facts['system'] or self.facts['system'] in ['Bitrig', 'DragonFly']: #FIXME: we might want to break out to individual BSDs self.facts['service_mgr'] = 'bsdinit' elif self.facts['system'] == 'AIX': @@ -587,12 +587,11 @@ class Facts(object): #FIXME: smf? self.facts['service_mgr'] = 'svcs' elif self.facts['system'] == 'Linux': - if self._check_systemd(): self.facts['service_mgr'] = 'systemd' elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"): self.facts['service_mgr'] = 'upstart' - elif module.get_bin_path('rc-service'): + elif os.path.realpath('/sbin/rc') == '/sbin/openrc': self.facts['service_mgr'] = 'openrc' elif os.path.exists('/etc/init.d/'): self.facts['service_mgr'] = 'sysvinit' From 27f4730c290bb331c2fcd0c0a953f43c1e781b60 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 18 Jan 2016 15:08:07 -0500 Subject: [PATCH 0394/1113] correctly deals with non serializable type combine_vars shoudl really be data types, but some just get in in test, add dict to mock and avoid combine_vars using object --- lib/ansible/utils/vars.py | 11 +++++++++-- test/units/plugins/strategies/test_strategy_base.py | 1 + 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/vars.py b/lib/ansible/utils/vars.py index 569bad2847a..4d44a068c20 100644 --- a/lib/ansible/utils/vars.py +++ b/lib/ansible/utils/vars.py @@ -28,7 +28,8 @@ from ansible.compat.six import iteritems, string_types from ansible import constants as C from ansible.errors import AnsibleError from ansible.parsing.splitter import parse_kv -from ansible.utils.unicode import to_unicode +from ansible.utils.unicode import to_unicode, to_str + def _validate_mutable_mappings(a, b): """ @@ -43,8 +44,14 @@ def _validate_mutable_mappings(a, b): # a variable number of arguments instead. if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)): + myvars = [] + for x in [a, b]: + try: + myvars.append(dumps(x)) + except: + myvars.append(to_str(x)) raise AnsibleError("failed to combine variables, expected dicts but got a '{0}' and a '{1}': \n{2}\n{3}".format( - a.__class__.__name__, b.__class__.__name__, dumps(a), dumps(b)) + a.__class__.__name__, b.__class__.__name__, myvars[0], myvars[1]) ) def combine_vars(a, b): diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 8d1a1e8adab..9ea944a2a17 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -196,6 +196,7 @@ class TestStrategyBase(unittest.TestCase): mock_inventory.get_host.side_effect = _get_host mock_inventory.get_group.side_effect = _get_group mock_inventory.clear_pattern_cache.return_value = None + mock_inventory.get_host_vars.return_value = {} mock_var_mgr = MagicMock() mock_var_mgr.set_host_variable.return_value = None From ded02b4968c62d275f9c7315579ab132829ea988 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 18 Jan 2016 13:48:37 -0800 Subject: [PATCH 0395/1113] Fix proposed by @Yannig to fix become success detection when the output is multiline See the Bug report for a specific error case with local connection, sudo, and the raw module Fixes #13728 --- lib/ansible/plugins/connection/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index ff00bc02380..bea8e5b426b 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -206,7 +206,10 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): pass def check_become_success(self, output): - return self._play_context.success_key == output.rstrip() + for line in output.splitlines(True): + if self._play_context.success_key == line.rstrip(): + return True + return False def check_password_prompt(self, output): if self._play_context.prompt is None: From 2c512e5a633431e56479ad1b8b1ed39062435695 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 18 Jan 2016 13:59:57 -0800 Subject: [PATCH 0396/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index fd59dccdd74..51c3aa42673 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit fd59dccdd7496733ddb8388caf6a701775c6e527 +Subproject commit 51c3aa42673c3661e559e29bcad470f5e1556f2f diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 38dfe233360..f798240f436 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 38dfe23336086a6a00d6abce42d7790633aefb9f +Subproject commit f798240f436a16a828f48759bbd176b6bccdfe75 From 1733d434d1a151f2a641924d5ae59d3086397117 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 18 Jan 2016 17:32:25 -0500 Subject: [PATCH 0397/1113] Fix with loop + delegate issues * Don't re-use the existing connection if the remote_addr field of the play context has changed * When overriding variables in PlayContext (from task/variables), don't set the same attribute based on a different variable name if we had already previously set it from another variable name Fixes #13880 --- lib/ansible/executor/task_executor.py | 2 +- lib/ansible/playbook/play_context.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 5f9cccae4c2..1417bc9d2c4 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -367,7 +367,7 @@ class TaskExecutor: self._task.args = variable_params # get the connection and the handler for this execution - if not self._connection or not getattr(self._connection, 'connected', False): + if not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr: self._connection = self._get_connection(variables=variables, templar=templar) self._connection.set_host_overrides(host=self._host) else: diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 6b19f4c1723..409f9661b8a 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -366,12 +366,17 @@ class PlayContext(Base): else: delegated_vars = dict() + attrs_considered = [] for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING): for variable_name in variable_names: + if attr in attrs_considered: + continue if isinstance(delegated_vars, dict) and variable_name in delegated_vars: setattr(new_info, attr, delegated_vars[variable_name]) + attrs_considered.append(attr) elif variable_name in variables: setattr(new_info, attr, variables[variable_name]) + attrs_considered.append(attr) # make sure we get port defaults if needed if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None: From 5e18bc595544d8abd1854f81af8a875307415d07 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 18 Jan 2016 14:42:50 -0800 Subject: [PATCH 0398/1113] Turn results that come from traceback messages into unicode, not str. Fixes #13964 Fixes #13967 --- lib/ansible/plugins/strategy/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 00744dbf730..0b7f48b5631 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -39,6 +39,7 @@ from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.included_file import IncludedFile from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader from ansible.template import Templar +from ansible.utils.unicode import to_unicode from ansible.vars.unsafe_proxy import wrap_var from ansible.vars import combine_vars @@ -465,7 +466,7 @@ class StrategyBase: # mark all of the hosts including this file as failed, send callbacks, # and increment the stats for this host for host in included_file._hosts: - tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e))) + tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_unicode(e))) iterator.mark_host_failed(host) self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) From 5dd2aad535f8ea24a27df279d596b6c8a0e5d077 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 18 Jan 2016 17:58:45 -0500 Subject: [PATCH 0399/1113] ignore exceptions in get_file_contents it should be common enough to not be able to read files in some jailed/container environments even though permissions tell us otherwise --- lib/ansible/module_utils/facts.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index a214b6bb30d..c5dc1f9ac80 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2987,6 +2987,9 @@ def get_file_content(path, default=None, strip=True): data = data.strip() if len(data) == 0: data = default + except: + # todo: issue warning about unreadable file? + pass finally: datafile.close() return data From 54435261e880b4723daca8c3a997dadd1ff319d8 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 18 Jan 2016 21:14:33 -0500 Subject: [PATCH 0400/1113] updated submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 51c3aa42673..ffea58ee86d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 51c3aa42673c3661e559e29bcad470f5e1556f2f +Subproject commit ffea58ee86dbee20dc272c74cd5f8e02f6f317e6 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f798240f436..e9450df8786 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f798240f436a16a828f48759bbd176b6bccdfe75 +Subproject commit e9450df878632531fae574b5eaf28bf0f7916948 From 9f05ce3e2b63e69fa865f157e4334bb1d731d76a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 18 Jan 2016 21:55:24 -0500 Subject: [PATCH 0401/1113] be consistent about conversion to_str --- lib/ansible/errors/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py index 2185a83e4c0..ffe6c9013db 100644 --- a/lib/ansible/errors/__init__.py +++ b/lib/ansible/errors/__init__.py @@ -56,7 +56,7 @@ class AnsibleError(Exception): if extended_error: self.message = 'ERROR! %s\n\n%s' % (to_str(message), to_str(extended_error)) else: - self.message = 'ERROR! %s' % message + self.message = 'ERROR! %s' % to_str(message) def __str__(self): return self.message From d09c9f526b96fedbabc79e998bc9f71ef42edbe5 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 18 Jan 2016 22:06:05 -0500 Subject: [PATCH 0402/1113] added missed ec2_vpc_net_facts --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b5b4ac0be7..e7cab6322a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -129,6 +129,7 @@ allowed in future versions: * amazon: ec2_remote_facts * amazon: ec2_vpc_igw * amazon: ec2_vpc_net +* amazon: ec2_vpc_net_facts * amazon: ec2_vpc_route_table * amazon: ec2_vpc_route_table_facts * amazon: ec2_vpc_subnet From 1f7492171e0a8eb0ee0e54ea9f3453d1f46e787b Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 18 Jan 2016 22:47:42 -0500 Subject: [PATCH 0403/1113] Don't clear start at task flag until all hosts are advanced Clearing the flag after the first host was advanced caused all other hosts to not advance at all. Fixes #13864 --- lib/ansible/executor/play_iterator.py | 51 +++++++++++++++------------ 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 3de07ec70c8..584cfb0fe67 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -137,30 +137,35 @@ class PlayIterator: self._blocks.append(new_block) self._host_states = {} + start_at_matched = False for host in inventory.get_hosts(self._play.hosts): - self._host_states[host.name] = HostState(blocks=self._blocks) - # if the host's name is in the variable manager's fact cache, then set - # its _gathered_facts flag to true for smart gathering tests later - if host.name in variable_manager._fact_cache: - host._gathered_facts = True - # if we're looking to start at a specific task, iterate through - # the tasks for this host until we find the specified task - if play_context.start_at_task is not None and not start_at_done: - while True: - (s, task) = self.get_next_task_for_host(host, peek=True) - if s.run_state == self.ITERATING_COMPLETE: - break - if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \ - task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task): - # we have our match, so clear the start_at_task field on the - # play context to flag that we've started at a task (and future - # plays won't try to advance) - play_context.start_at_task = None - break - else: - self.get_next_task_for_host(host) - # finally, reset the host's state to ITERATING_SETUP - self._host_states[host.name].run_state = self.ITERATING_SETUP + self._host_states[host.name] = HostState(blocks=self._blocks) + # if the host's name is in the variable manager's fact cache, then set + # its _gathered_facts flag to true for smart gathering tests later + if host.name in variable_manager._fact_cache: + host._gathered_facts = True + # if we're looking to start at a specific task, iterate through + # the tasks for this host until we find the specified task + if play_context.start_at_task is not None and not start_at_done: + while True: + (s, task) = self.get_next_task_for_host(host, peek=True) + if s.run_state == self.ITERATING_COMPLETE: + break + if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \ + task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task): + start_at_matched = True + break + else: + self.get_next_task_for_host(host) + + # finally, reset the host's state to ITERATING_SETUP + self._host_states[host.name].run_state = self.ITERATING_SETUP + + if start_at_matched: + # we have our match, so clear the start_at_task field on the + # play context to flag that we've started at a task (and future + # plays won't try to advance) + play_context.start_at_task = None # Extend the play handlers list to include the handlers defined in roles self._play.handlers.extend(play.compile_roles_handlers()) From a773486432ae03fac53a5e176ba46d0d187762b8 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 19 Jan 2016 08:31:10 -0500 Subject: [PATCH 0404/1113] fixed exception handling to be 2.4 compatible previous 'fix' broke on 2.4 --- lib/ansible/module_utils/facts.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index c5dc1f9ac80..3cb66a83e8c 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2981,17 +2981,19 @@ def get_file_content(path, default=None, strip=True): data = default if os.path.exists(path) and os.access(path, os.R_OK): try: - datafile = open(path) - data = datafile.read() - if strip: - data = data.strip() - if len(data) == 0: - data = default + try: + datafile = open(path) + data = datafile.read() + if strip: + data = data.strip() + if len(data) == 0: + data = default + finally: + datafile.close() except: - # todo: issue warning about unreadable file? + # ignore errors as some jails/containers might have readable permissions but not allow reads to proc + # done in 2 blocks for 2.4 compat pass - finally: - datafile.close() return data def get_file_lines(path): From 742bd2c5542dba30cc3f60aeae81c67bb2685564 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 19 Jan 2016 08:48:55 -0500 Subject: [PATCH 0405/1113] clarify role include paths --- docsite/rst/playbooks_roles.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 76bff6666c0..73c9710f519 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -191,7 +191,7 @@ This designates the following behaviors, for each role 'x': - If roles/x/handlers/main.yml exists, handlers listed therein will be added to the play - If roles/x/vars/main.yml exists, variables listed therein will be added to the play - If roles/x/meta/main.yml exists, any role dependencies listed therein will be added to the list of roles (1.3 and later) -- Any copy, script, template or include tasks (in the role) can reference files in roles/x/files/ without having to path them relatively or absolutely +- Any copy, script, template or include tasks (in the role) can reference files in roles/x/{files,templates,tasks}/ (dir depends on task) without having to path them relatively or absolutely In Ansible 1.4 and later you can configure a roles_path to search for roles. Use this to check all of your common roles out to one location, and share them easily between multiple playbook projects. See :doc:`intro_configuration` for details about how to set this up in ansible.cfg. From 40373dea4d01e4e8899cf8036ce01a1229d62247 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 19 Jan 2016 05:25:21 -0800 Subject: [PATCH 0406/1113] Make all parts of messages and pathnames into unicode so that we don't get UnicodeError tracebacks. Note that the fix for display normalizing to unicode is correct but the fix for pathnames is probably not. Changing pathnames to unicode type means that we will handle utf8 pathnames fine but pathnames can be any sequence of bytes that do not contain null. We do not handle sequences of bytes that are not valid utf8 here. To do that we need to revamp the handling of basedir and paths to transform to bytes instead of unicode. Didn't want to do that in 2.0.x as it will potentially introduce other bugs as we find all the places that we combine basedir with other path elements. Since no one has raised that as an issue thus far so it's not something we need to handle yet. But it's something to keep in mind for the future. To test utf8 handling, create a utf8 directory and run a playbook from within there. To test non-utf8 handling (currently doesn't work as stated above), create a directory with non-utf8 chars an run a playbook from there. In bash, create that directory like this: mkdir $'\377' Fixes #13937 --- lib/ansible/cli/__init__.py | 6 +++--- lib/ansible/inventory/__init__.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 77aa6a55497..12ba8f89004 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -32,7 +32,7 @@ import subprocess from ansible import __version__ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError -from ansible.utils.unicode import to_bytes +from ansible.utils.unicode import to_bytes, to_unicode try: from __main__ import display @@ -105,9 +105,9 @@ class CLI(object): if self.options.verbosity > 0: if C.CONFIG_FILE: - display.display("Using %s as config file" % C.CONFIG_FILE) + display.display(u"Using %s as config file" % to_unicode(C.CONFIG_FILE)) else: - display.display("No config file found; using defaults") + display.display(u"No config file found; using defaults") @staticmethod def ask_vault_passwords(ask_new_vault_pass=False, rekey=False): diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 5885d28014c..0184794fc01 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -735,11 +735,11 @@ class Inventory(object): if group and host is None: # load vars in dir/group_vars/name_of_group - base_path = os.path.realpath(os.path.join(basedir, "group_vars/%s" % group.name)) + base_path = os.path.realpath(os.path.join(to_unicode(basedir), "group_vars/%s" % group.name)) results = combine_vars(results, self._variable_manager.add_group_vars_file(base_path, self._loader)) elif host and group is None: # same for hostvars in dir/host_vars/name_of_host - base_path = os.path.realpath(os.path.join(basedir, "host_vars/%s" % host.name)) + base_path = os.path.realpath(os.path.join(to_unicode(basedir), "host_vars/%s" % host.name)) results = combine_vars(results, self._variable_manager.add_host_vars_file(base_path, self._loader)) # all done, results is a dictionary of variables for this particular host. From 94a9ed0ee1189c0eb59f3fc09e47b2b77aa92970 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 19 Jan 2016 05:48:12 -0800 Subject: [PATCH 0407/1113] Set decoding of path to unicode to raise an exception if non-utf8 Letting it pass would just cause an error later on (no such file found) so it's better to catch it here and know that we have users dealing with non-utf8 pathnames than to have to track it down from later on. --- lib/ansible/inventory/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 0184794fc01..eb8d1905502 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -735,11 +735,11 @@ class Inventory(object): if group and host is None: # load vars in dir/group_vars/name_of_group - base_path = os.path.realpath(os.path.join(to_unicode(basedir), "group_vars/%s" % group.name)) + base_path = os.path.realpath(os.path.join(to_unicode(basedir, errors='strict'), "group_vars/%s" % group.name)) results = combine_vars(results, self._variable_manager.add_group_vars_file(base_path, self._loader)) elif host and group is None: # same for hostvars in dir/host_vars/name_of_host - base_path = os.path.realpath(os.path.join(to_unicode(basedir), "host_vars/%s" % host.name)) + base_path = os.path.realpath(os.path.join(to_unicode(basedir, errors='strict'), "host_vars/%s" % host.name)) results = combine_vars(results, self._variable_manager.add_host_vars_file(base_path, self._loader)) # all done, results is a dictionary of variables for this particular host. From 1af473548b2c38e15940257bc22952246a3a64a8 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 19 Jan 2016 11:02:15 -0500 Subject: [PATCH 0408/1113] Fix role hashing failure/traceback when params contain lists Fixes #13857 --- lib/ansible/playbook/role/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index ce82573dc03..10e14b4ac38 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -43,7 +43,10 @@ __all__ = ['Role', 'hash_params'] # strategies (ansible/plugins/strategy/__init__.py) def hash_params(params): if not isinstance(params, dict): - return params + if isinstance(params, list): + return frozenset(params) + else: + return params else: s = set() for k,v in iteritems(params): From a7dd425620e0a40911908a41e1b378183ae690f4 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 19 Jan 2016 12:07:45 -0500 Subject: [PATCH 0409/1113] Catch INI section parsing misses and raise an appropriate error Fixes #13917 --- lib/ansible/inventory/ini.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 9224ef2d23d..4d43977004d 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -23,7 +23,7 @@ import ast import re from ansible import constants as C -from ansible.errors import AnsibleError, AnsibleParserError +from ansible.errors import AnsibleError from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range @@ -124,6 +124,9 @@ class InventoryParser(object): del pending_declarations[groupname] continue + elif line.startswith('['): + self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + \ + "in the section entry, and that there are no other invalid characters") # It's not a section, so the current state tells us what kind of # definition it must be. The individual parsers will raise an From b1223746cdb75827093e1d96115047aa0b5f343f Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 19 Jan 2016 12:09:04 -0500 Subject: [PATCH 0410/1113] Relocate use of ERROR to display class, to avoid doubling up --- lib/ansible/errors/__init__.py | 4 ++-- lib/ansible/utils/display.py | 2 +- test/units/errors/test_errors.py | 16 ++++++++-------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py index ffe6c9013db..faf7c33416d 100644 --- a/lib/ansible/errors/__init__.py +++ b/lib/ansible/errors/__init__.py @@ -54,9 +54,9 @@ class AnsibleError(Exception): if obj and isinstance(obj, AnsibleBaseYAMLObject): extended_error = self._get_extended_error() if extended_error: - self.message = 'ERROR! %s\n\n%s' % (to_str(message), to_str(extended_error)) + self.message = '%s\n\n%s' % (to_str(message), to_str(extended_error)) else: - self.message = 'ERROR! %s' % to_str(message) + self.message = '%s' % to_str(message) def __str__(self): return self.message diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 57cee14ffe3..3703c15540b 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -261,7 +261,7 @@ class Display: wrapped = textwrap.wrap(new_msg, self.columns) new_msg = u"\n".join(wrapped) + u"\n" else: - new_msg = msg + new_msg = u"ERROR! " + msg if new_msg not in self._errors: self.display(new_msg, color=C.COLOR_ERROR, stderr=True) self._errors[new_msg] = 1 diff --git a/test/units/errors/test_errors.py b/test/units/errors/test_errors.py index 4c09c0089b3..4480bf01df9 100644 --- a/test/units/errors/test_errors.py +++ b/test/units/errors/test_errors.py @@ -40,13 +40,13 @@ class TestErrors(unittest.TestCase): def test_basic_error(self): e = AnsibleError(self.message) - self.assertEqual(e.message, 'ERROR! ' + self.message) - self.assertEqual(e.__repr__(), 'ERROR! ' + self.message) + self.assertEqual(e.message, self.message) + self.assertEqual(e.__repr__(), self.message) def test_basic_unicode_error(self): e = AnsibleError(self.unicode_message) - self.assertEqual(e.message, 'ERROR! ' + self.unicode_message) - self.assertEqual(e.__repr__(), 'ERROR! ' + self.unicode_message) + self.assertEqual(e.message, self.unicode_message) + self.assertEqual(e.__repr__(), self.unicode_message) @patch.object(AnsibleError, '_get_error_lines_from_file') def test_error_with_object(self, mock_method): @@ -55,7 +55,7 @@ class TestErrors(unittest.TestCase): mock_method.return_value = ('this is line 1\n', '') e = AnsibleError(self.message, self.obj) - self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n") + self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n") def test_get_error_lines_from_file(self): m = mock_open() @@ -65,12 +65,12 @@ class TestErrors(unittest.TestCase): # this line will be found in the file self.obj.ansible_pos = ('foo.yml', 1, 1) e = AnsibleError(self.message, self.obj) - self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n") + self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n") # this line will not be found, as it is out of the index range self.obj.ansible_pos = ('foo.yml', 2, 1) e = AnsibleError(self.message, self.obj) - self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)") + self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)") m = mock_open() m.return_value.readlines.return_value = ['this line has unicode \xf0\x9f\x98\xa8 in it!\n'] @@ -79,5 +79,5 @@ class TestErrors(unittest.TestCase): # this line will be found in the file self.obj.ansible_pos = ('foo.yml', 1, 1) e = AnsibleError(self.unicode_message, self.obj) - self.assertEqual(e.message, "ERROR! This is an error with \xf0\x9f\x98\xa8 in it\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis line has unicode \xf0\x9f\x98\xa8 in it!\n^ here\n") + self.assertEqual(e.message, "This is an error with \xf0\x9f\x98\xa8 in it\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis line has unicode \xf0\x9f\x98\xa8 in it!\n^ here\n") From 1b46a422aa52a90c9dd7b168be1fed9c4273b6b2 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 19 Jan 2016 14:12:27 -0500 Subject: [PATCH 0411/1113] Properly look for parent become attribute Corrects inheritence of the boolean value, which needs some special consideration from other (string/int) values. Fixes #13872 --- lib/ansible/playbook/become.py | 7 ++++--- lib/ansible/playbook/task.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py index 1e579751d46..cf13e2d1c3a 100644 --- a/lib/ansible/playbook/become.py +++ b/lib/ansible/playbook/become.py @@ -106,10 +106,11 @@ class Become: ''' Override for the 'become' getattr fetcher, used from Base. ''' + value = self._attributes['become'] if hasattr(self, '_get_parent_attribute'): - return self._get_parent_attribute('become') - else: - return self._attributes['become'] + if self._get_parent_attribute('become'): + return True + return value def _get_attr_become_method(self): ''' diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 154ff53d5e3..f623f0566fe 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -417,7 +417,7 @@ class Task(Base, Conditional, Taggable, Become): def _get_attr_any_errors_fatal(self): ''' - Override for the 'tags' getattr fetcher, used from Base. + Override for the 'any_errors_fatal' getattr fetcher, used from Base. ''' any_errors_fatal = self._attributes['any_errors_fatal'] if hasattr(self, '_get_parent_attribute'): From 5144ee226ead9e77120e48b2d898c3200ae5d73c Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 14:05:29 -0500 Subject: [PATCH 0412/1113] adds private key file support to shell shared module This commit provides an argument to provide a path to the private key file. This will allow paramiko to use the key file as opposed to only username / password combinations for CLI connections. --- lib/ansible/module_utils/shell.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/shell.py b/lib/ansible/module_utils/shell.py index 0107911ba02..13506c43226 100644 --- a/lib/ansible/module_utils/shell.py +++ b/lib/ansible/module_utils/shell.py @@ -31,7 +31,7 @@ except ImportError: ANSI_RE = re.compile(r'(\x1b\[\?1h\x1b=)') CLI_PROMPTS_RE = [ - re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*[>|#](?:\s*)$'), + re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*[>|#|%](?:\s*)$'), re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*\(.+\)#(?:\s*)$') ] @@ -84,15 +84,18 @@ class Shell(object): self.errors.extend(CLI_ERRORS_RE) def open(self, host, port=22, username=None, password=None, - timeout=10, key_filename=None): + timeout=10, key_filename=None, pkey=None, look_for_keys=None): self.ssh = paramiko.SSHClient() self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - use_keys = password is None + # unless explicitly set, disable look for keys if a password is + # present. this changes the default search order paramiko implements + if not look_for_keys: + look_for_keys = password is None self.ssh.connect(host, port=port, username=username, password=password, - timeout=timeout, allow_agent=use_keys, look_for_keys=use_keys, + timeout=timeout, look_for_keys=look_for_keys, pkey=pkey, key_filename=key_filename) self.shell = self.ssh.invoke_shell() From 33d390fb5884af976b72023867051f5e4daf18aa Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 12:57:14 -0500 Subject: [PATCH 0413/1113] adds provider argument to junos shared module This commit adds a new argument `provider` to the junos shared module. The argument allows the set of common connection args to be passed to the junos shared module. This commit also updates the junos doc fragment --- lib/ansible/module_utils/junos.py | 42 +++++++++---------- .../utils/module_docs_fragments/junos.py | 7 ++++ 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/lib/ansible/module_utils/junos.py b/lib/ansible/module_utils/junos.py index fa3104101e4..33af9266e72 100644 --- a/lib/ansible/module_utils/junos.py +++ b/lib/ansible/module_utils/junos.py @@ -21,7 +21,8 @@ NET_COMMON_ARGS = dict( host=dict(required=True), port=dict(default=22, type='int'), username=dict(required=True), - password=dict(no_log=True) + password=dict(no_log=True), + provider=dict() ) def to_list(val): @@ -52,10 +53,10 @@ class Cli(object): return self.shell.send(commands) -class JunosModule(AnsibleModule): +class NetworkModule(AnsibleModule): def __init__(self, *args, **kwargs): - super(JunosModule, self).__init__(*args, **kwargs) + super(NetworkModule, self).__init__(*args, **kwargs) self.connection = None self._config = None @@ -65,14 +66,19 @@ class JunosModule(AnsibleModule): self._config = self.get_config() return self._config + def _load_params(self): + params = super(NetworkModule, self)._load_params() + provider = params.get('provider') or dict() + for key, value in provider.items(): + if key in NET_COMMON_ARGS.keys(): + params[key] = value + return params + def connect(self): - try: - self.connection = Cli(self) - self.connection.connect() - self.execute('cli') - self.execute('set cli screen-length 0') - except ShellErrror, exc: - self.fail_json(msg=str(exc)) + self.connection = Cli(self) + self.connection.connect() + self.execute('cli') + self.execute('set cli screen-length 0') def configure(self, commands): commands = to_list(commands) @@ -84,10 +90,7 @@ class JunosModule(AnsibleModule): return responses def execute(self, commands, **kwargs): - try: - return self.connection.send(commands) - except ShellError, exc: - self.fail_json(msg=exc.message) + return self.connection.send(commands) def disconnect(self): self.connection.close() @@ -100,27 +103,20 @@ class JunosModule(AnsibleModule): return self.execute(cmd)[0] def get_module(**kwargs): - """Return instance of JunosModule + """Return instance of NetworkModule """ - argument_spec = NET_COMMON_ARGS.copy() if kwargs.get('argument_spec'): argument_spec.update(kwargs['argument_spec']) kwargs['argument_spec'] = argument_spec kwargs['check_invalid_arguments'] = False - module = JunosModule(**kwargs) + module = NetworkModule(**kwargs) # HAS_PARAMIKO is set by module_utils/shell.py if not HAS_PARAMIKO: module.fail_json(msg='paramiko is required but does not appear to be installed') - # copy in values from local action. - params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - for key, value in params.iteritems(): - module.params[key] = value - module.connect() - return module diff --git a/lib/ansible/utils/module_docs_fragments/junos.py b/lib/ansible/utils/module_docs_fragments/junos.py index d7edb02da7f..96627288ca7 100644 --- a/lib/ansible/utils/module_docs_fragments/junos.py +++ b/lib/ansible/utils/module_docs_fragments/junos.py @@ -48,5 +48,12 @@ options: the SSH session required: false default: null + provider: + description: + - Convience method that allows all M(ios) arguments to be passed as + a dict object. All constraints (required, choices, etc) must be + met either by individual arguments or values in this dict. + required: false + default: null """ From 7640eca368171a755318f25c9b3f3ef134f141bf Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 11:25:24 -0500 Subject: [PATCH 0414/1113] adds provider argument to openswitch shared module Adds new argument `provider` to the openswitch shared module. The provider argument can pass all openswitch connection arguments as a dict object. This update includes adding the provider argument to the openswitch doc fragment --- lib/ansible/module_utils/openswitch.py | 29 ++++++++++--------- .../utils/module_docs_fragments/openswitch.py | 16 ++++++++++ 2 files changed, 31 insertions(+), 14 deletions(-) diff --git a/lib/ansible/module_utils/openswitch.py b/lib/ansible/module_utils/openswitch.py index 9ff7450ee74..ba3eb7b44ab 100644 --- a/lib/ansible/module_utils/openswitch.py +++ b/lib/ansible/module_utils/openswitch.py @@ -35,7 +35,9 @@ NET_COMMON_ARGS = dict( port=dict(type='int'), username=dict(), password=dict(no_log=True), + use_ssl=dict(default=True, type='int'), transport=dict(default='ssh', choices=['ssh', 'cli', 'rest']), + provider=dict() ) def to_list(val): @@ -150,10 +152,10 @@ class Cli(object): def send(self, commands, encoding='text'): return self.shell.send(commands) -class OpsModule(AnsibleModule): +class NetworkModule(AnsibleModule): def __init__(self, *args, **kwargs): - super(OpsModule, self).__init__(*args, **kwargs) + super(NetworkModule, self).__init__(*args, **kwargs) self.connection = None self._config = None self._runconfig = None @@ -164,16 +166,21 @@ class OpsModule(AnsibleModule): self._config = self.get_config() return self._config + def _load_params(self): + params = super(NetworkModule, self)._load_params() + provider = params.get('provider') or dict() + for key, value in provider.items(): + if key in NET_COMMON_ARGS.keys(): + params[key] = value + return params + def connect(self): if self.params['transport'] == 'rest': self.connection = Rest(self) elif self.params['transport'] == 'cli': self.connection = Cli(self) - try: - self.connection.connect() - except Exception, exc: - self.fail_json(msg=exc.message) + self.connection.connect() def configure(self, config): if self.params['transport'] == 'cli': @@ -217,15 +224,14 @@ class OpsModule(AnsibleModule): def get_module(**kwargs): - """Return instance of OpsModule + """Return instance of NetworkModule """ argument_spec = NET_COMMON_ARGS.copy() if kwargs.get('argument_spec'): argument_spec.update(kwargs['argument_spec']) kwargs['argument_spec'] = argument_spec - kwargs['check_invalid_arguments'] = False - module = OpsModule(**kwargs) + module = NetworkModule(**kwargs) if not HAS_OPS and module.params['transport'] == 'ssh': module.fail_json(msg='could not import ops library') @@ -234,11 +240,6 @@ def get_module(**kwargs): if module.params['transport'] == 'cli' and not HAS_PARAMIKO: module.fail_json(msg='paramiko is required but does not appear to be installed') - # copy in values from local action. - params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - for key, value in params.iteritems(): - module.params[key] = value - if module.params['transport'] in ['cli', 'rest']: module.connect() diff --git a/lib/ansible/utils/module_docs_fragments/openswitch.py b/lib/ansible/utils/module_docs_fragments/openswitch.py index 1427fc75253..3b3dbcaeccb 100644 --- a/lib/ansible/utils/module_docs_fragments/openswitch.py +++ b/lib/ansible/utils/module_docs_fragments/openswitch.py @@ -62,5 +62,21 @@ options: required: true default: ssh choices: ['ssh', 'cli', 'rest'] + use_ssl: + description: + - Configures the I(transport) to use SSL if set to true only when the + I(transport) argument is configured as rest. If the transport + argument is not rest, this value is ignored + required: false + default: true + choices: BOOLEANS + provider: + description: + - Convience method that allows all M(openswitch) arguments to be passed as + a dict object. All constraints (required, choices, etc) must be + met either by individual arguments or values in this dict. + required: false + default: null + """ From 0f2917fde32e74eac10053605a5885527b2ee368 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 12:27:19 -0500 Subject: [PATCH 0415/1113] add provider to iosxr shared module This commit adds a new argument `provider` to the iosxr shared module that allows common connection parameters to be passed as a dict object. The constraints on the args still applies. This commit also updates the iosxr doc fragment. --- lib/ansible/module_utils/iosxr.py | 27 ++++++++++--------- .../utils/module_docs_fragments/iosxr.py | 7 +++++ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/lib/ansible/module_utils/iosxr.py b/lib/ansible/module_utils/iosxr.py index 9686adc7f5f..7ca360c5efd 100644 --- a/lib/ansible/module_utils/iosxr.py +++ b/lib/ansible/module_utils/iosxr.py @@ -23,7 +23,8 @@ NET_COMMON_ARGS = dict( host=dict(required=True), port=dict(default=22, type='int'), username=dict(required=True), - password=dict(no_log=True) + password=dict(no_log=True), + provider=dict() ) def to_list(val): @@ -53,10 +54,10 @@ class Cli(object): def send(self, commands): return self.shell.send(commands) -class IosxrModule(AnsibleModule): +class NetworkModule(AnsibleModule): def __init__(self, *args, **kwargs): - super(IosxrModule, self).__init__(*args, **kwargs) + super(NetworkModule, self).__init__(*args, **kwargs) self.connection = None self._config = None @@ -66,6 +67,14 @@ class IosxrModule(AnsibleModule): self._config = self.get_config() return self._config + def _load_params(self): + params = super(NetworkModule, self)._load_params() + provider = params.get('provider') or dict() + for key, value in provider.items(): + if key in NET_COMMON_ARGS.keys(): + params[key] = value + return params + def connect(self): try: self.connection = Cli(self) @@ -96,26 +105,18 @@ class IosxrModule(AnsibleModule): return self.execute('show running-config')[0] def get_module(**kwargs): - """Return instance of IosxrModule + """Return instance of NetworkModule """ - argument_spec = NET_COMMON_ARGS.copy() if kwargs.get('argument_spec'): argument_spec.update(kwargs['argument_spec']) kwargs['argument_spec'] = argument_spec - kwargs['check_invalid_arguments'] = False - module = IosxrModule(**kwargs) + module = NetworkModule(**kwargs) if not HAS_PARAMIKO: module.fail_json(msg='paramiko is required but does not appear to be installed') - # copy in values from local action. - params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - for key, value in params.iteritems(): - module.params[key] = value - module.connect() - return module diff --git a/lib/ansible/utils/module_docs_fragments/iosxr.py b/lib/ansible/utils/module_docs_fragments/iosxr.py index cb9ba28fc2a..3b9959db47c 100644 --- a/lib/ansible/utils/module_docs_fragments/iosxr.py +++ b/lib/ansible/utils/module_docs_fragments/iosxr.py @@ -48,5 +48,12 @@ options: the SSH session required: false default: null + provider: + description: + - Convience method that allows all M(iosxr) arguments to be passed as + a dict object. All constraints (required, choices, etc) must be + met either by individual arguments or values in this dict. + required: false + default: null """ From ca8261ed317fea7415828ab8a80f44c3f3d15d9c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 19 Jan 2016 14:06:56 -0800 Subject: [PATCH 0416/1113] Don't tracback trying to retore settings Can occur if we exit the timeout before the settings were changed --- lib/ansible/plugins/action/pause.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py index f3a70ed7e35..97fa9ac3207 100644 --- a/lib/ansible/plugins/action/pause.py +++ b/lib/ansible/plugins/action/pause.py @@ -105,6 +105,8 @@ class ActionModule(ActionBase): result['start'] = str(datetime.datetime.now()) result['user_input'] = '' + fd = None + old_settings = None try: if seconds is not None: # setup the alarm handler @@ -159,7 +161,7 @@ class ActionModule(ActionBase): finally: # cleanup and save some information # restore the old settings for the duped stdin fd - if isatty(fd): + if not(None in (fd, old_settings)) and isatty(fd): termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) duration = time.time() - start From d07d974ad83d0e8f80c826ac254f0425c917562d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 19 Jan 2016 18:17:42 -0500 Subject: [PATCH 0417/1113] Revert "Properly look for parent become attribute" This reverts commit 1b46a422aa52a90c9dd7b168be1fed9c4273b6b2. --- lib/ansible/playbook/become.py | 7 +++---- lib/ansible/playbook/task.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py index cf13e2d1c3a..1e579751d46 100644 --- a/lib/ansible/playbook/become.py +++ b/lib/ansible/playbook/become.py @@ -106,11 +106,10 @@ class Become: ''' Override for the 'become' getattr fetcher, used from Base. ''' - value = self._attributes['become'] if hasattr(self, '_get_parent_attribute'): - if self._get_parent_attribute('become'): - return True - return value + return self._get_parent_attribute('become') + else: + return self._attributes['become'] def _get_attr_become_method(self): ''' diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index f623f0566fe..154ff53d5e3 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -417,7 +417,7 @@ class Task(Base, Conditional, Taggable, Become): def _get_attr_any_errors_fatal(self): ''' - Override for the 'any_errors_fatal' getattr fetcher, used from Base. + Override for the 'tags' getattr fetcher, used from Base. ''' any_errors_fatal = self._attributes['any_errors_fatal'] if hasattr(self, '_get_parent_attribute'): From e2ff26a5cfbc7d12686dbbb29f44109676833a5d Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 07:26:19 -0500 Subject: [PATCH 0418/1113] add provider argument to eos shared module Adds a new argument `provider` to the eos shared module and updates the eos doc fragment. This commit includes some additional minor fixes and code refactors for naming conventions. The `provider` argument allows the shared module arguments to be passed as a dict object instead of having to pass each argument invididually. --- lib/ansible/module_utils/eos.py | 36 ++++++++++++------- .../utils/module_docs_fragments/eos.py | 7 ++++ 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/lib/ansible/module_utils/eos.py b/lib/ansible/module_utils/eos.py index d4656b11915..a89869dced4 100644 --- a/lib/ansible/module_utils/eos.py +++ b/lib/ansible/module_utils/eos.py @@ -26,7 +26,8 @@ NET_COMMON_ARGS = dict( authorize=dict(default=False, type='bool'), auth_pass=dict(no_log=True), transport=dict(choices=['cli', 'eapi']), - use_ssl=dict(default=True, type='bool') + use_ssl=dict(default=True, type='bool'), + provider=dict() ) def to_list(val): @@ -129,10 +130,10 @@ class Cli(object): def send(self, commands, encoding='text'): return self.shell.send(commands) -class EosModule(AnsibleModule): +class NetworkModule(AnsibleModule): def __init__(self, *args, **kwargs): - super(EosModule, self).__init__(*args, **kwargs) + super(NetworkModule, self).__init__(*args, **kwargs) self.connection = None self._config = None @@ -142,6 +143,14 @@ class EosModule(AnsibleModule): self._config = self.get_config() return self._config + def _load_params(self): + params = super(NetworkModule, self)._load_params() + provider = params.get('provider') or dict() + for key, value in provider.items(): + if key in NET_COMMON_ARGS.keys(): + params[key] = value + return params + def connect(self): if self.params['transport'] == 'eapi': self.connection = Eapi(self) @@ -163,8 +172,18 @@ class EosModule(AnsibleModule): commands.insert(0, 'configure terminal') responses = self.execute(commands) responses.pop(0) + return responses + def config_replace(self, commands): + if self.params['transport'] == 'cli': + self.fail_json(msg='config replace only supported over eapi') + + cmd = 'configure replace terminal:' + commands = '\n'.join(to_list(commands)) + command = dict(cmd=cmd, input=commands) + self.execute(command) + def execute(self, commands, **kwargs): try: return self.connection.send(commands, **kwargs) @@ -189,26 +208,19 @@ class EosModule(AnsibleModule): def get_module(**kwargs): - """Return instance of EosModule + """Return instance of NetworkModule """ - argument_spec = NET_COMMON_ARGS.copy() if kwargs.get('argument_spec'): argument_spec.update(kwargs['argument_spec']) kwargs['argument_spec'] = argument_spec - kwargs['check_invalid_arguments'] = False - module = EosModule(**kwargs) + module = NetworkModule(**kwargs) # HAS_PARAMIKO is set by module_utils/shell.py if module.params['transport'] == 'cli' and not HAS_PARAMIKO: module.fail_json(msg='paramiko is required but does not appear to be installed') - # copy in values from local action. - params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - for key, value in params.iteritems(): - module.params[key] = value - module.connect() return module diff --git a/lib/ansible/utils/module_docs_fragments/eos.py b/lib/ansible/utils/module_docs_fragments/eos.py index 7cca8b2a781..bd8d3f510ed 100644 --- a/lib/ansible/utils/module_docs_fragments/eos.py +++ b/lib/ansible/utils/module_docs_fragments/eos.py @@ -80,5 +80,12 @@ options: required: false default: true choices: BOOLEANS + provider: + description: + - Convience method that allows all M(eos) arguments to be passed as + a dict object. All constraints (required, choices, etc) must be + met either by individual arguments or values in this dict. + required: false + default: null """ From 9cba1a7c697f57cdc9736aaa9283032d76ea90f3 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 07:35:16 -0500 Subject: [PATCH 0419/1113] adds provider argument to ios shared module New argument `provider` added to the ios shared module that provides the ability to pass all of the common ios arguments as a dict. This commit includes some minor bugfixes and refactoring of names. It also includes udpates to the ios documentation fragment for the new argument --- lib/ansible/module_utils/ios.py | 25 ++++++++++--------- .../utils/module_docs_fragments/ios.py | 7 ++++++ 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/lib/ansible/module_utils/ios.py b/lib/ansible/module_utils/ios.py index f6d6037f808..95937ca2191 100644 --- a/lib/ansible/module_utils/ios.py +++ b/lib/ansible/module_utils/ios.py @@ -26,6 +26,7 @@ NET_COMMON_ARGS = dict( password=dict(no_log=True), authorize=dict(default=False, type='bool'), auth_pass=dict(no_log=True), + provider=dict() ) def to_list(val): @@ -59,10 +60,10 @@ class Cli(object): def send(self, commands): return self.shell.send(commands) -class IosModule(AnsibleModule): +class NetworkModule(AnsibleModule): def __init__(self, *args, **kwargs): - super(IosModule, self).__init__(*args, **kwargs) + super(NetworkModule, self).__init__(*args, **kwargs) self.connection = None self._config = None @@ -72,6 +73,14 @@ class IosModule(AnsibleModule): self._config = self.get_config() return self._config + def _load_params(self): + params = super(NetworkModule, self)._load_params() + provider = params.get('provider') or dict() + for key, value in provider.items(): + if key in NET_COMMON_ARGS.keys(): + params[key] = value + return params + def connect(self): try: self.connection = Cli(self) @@ -107,27 +116,19 @@ class IosModule(AnsibleModule): return self.execute(cmd)[0] def get_module(**kwargs): - """Return instance of IosModule + """Return instance of NetworkModule """ - argument_spec = NET_COMMON_ARGS.copy() if kwargs.get('argument_spec'): argument_spec.update(kwargs['argument_spec']) kwargs['argument_spec'] = argument_spec - kwargs['check_invalid_arguments'] = False - module = IosModule(**kwargs) + module = NetworkModule(**kwargs) # HAS_PARAMIKO is set by module_utils/shell.py if not HAS_PARAMIKO: module.fail_json(msg='paramiko is required but does not appear to be installed') - # copy in values from local action. - params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - for key, value in params.iteritems(): - module.params[key] = value - module.connect() - return module diff --git a/lib/ansible/utils/module_docs_fragments/ios.py b/lib/ansible/utils/module_docs_fragments/ios.py index 5f07bbfde76..66ba28ad021 100644 --- a/lib/ansible/utils/module_docs_fragments/ios.py +++ b/lib/ansible/utils/module_docs_fragments/ios.py @@ -63,5 +63,12 @@ options: does nothing required: false default: none + provider: + description: + - Convience method that allows all M(ios) arguments to be passed as + a dict object. All constraints (required, choices, etc) must be + met either by individual arguments or values in this dict. + required: false + default: null """ From 981265ac8435407d04e143806a561186c4ef9150 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 08:38:38 -0500 Subject: [PATCH 0420/1113] adds provider argument to nxos shared module The provider argument accepts the set of device common arguments as a dict object. Individual connection arguments can still be included and take priority over the provider argument. This update includes additions to the nxos doc fragment --- lib/ansible/module_utils/nxos.py | 27 ++++++++++--------- .../utils/module_docs_fragments/nxos.py | 7 +++++ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py index 41e4269ade3..d8eb0f97de4 100644 --- a/lib/ansible/module_utils/nxos.py +++ b/lib/ansible/module_utils/nxos.py @@ -24,7 +24,8 @@ NET_COMMON_ARGS = dict( username=dict(required=True), password=dict(no_log=True), transport=dict(choices=['cli', 'nxapi']), - use_ssl=dict(default=False, type='bool') + use_ssl=dict(default=False, type='bool'), + provider=dict() ) NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash'] @@ -131,10 +132,10 @@ class Cli(object): def send(self, commands, encoding='text'): return self.shell.send(commands) -class NxosModule(AnsibleModule): +class NetworkModule(AnsibleModule): def __init__(self, *args, **kwargs): - super(NxosModule, self).__init__(*args, **kwargs) + super(NetworkModule, self).__init__(*args, **kwargs) self.connection = None self._config = None @@ -144,6 +145,14 @@ class NxosModule(AnsibleModule): self._config = self.get_config() return self._config + def _load_params(self): + params = super(NetworkModule, self)._load_params() + provider = params.get('provider') or dict() + for key, value in provider.items(): + if key in NET_COMMON_ARGS.keys(): + params[key] = value + return params + def connect(self): if self.params['transport'] == 'nxapi': self.connection = Nxapi(self) @@ -191,26 +200,18 @@ class NxosModule(AnsibleModule): return resp['ins_api']['outputs']['output']['body'] def get_module(**kwargs): - """Return instance of NxosModule + """Return instance of NetworkModule """ - argument_spec = NET_COMMON_ARGS.copy() if kwargs.get('argument_spec'): argument_spec.update(kwargs['argument_spec']) kwargs['argument_spec'] = argument_spec - kwargs['check_invalid_arguments'] = False - module = NxosModule(**kwargs) + module = NetworkModule(**kwargs) # HAS_PARAMIKO is set by module_utils/shell.py if module.params['transport'] == 'cli' and not HAS_PARAMIKO: module.fail_json(msg='paramiko is required but does not appear to be installed') - # copy in values from local action. - params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - for key, value in params.iteritems(): - module.params[key] = value - module.connect() - return module diff --git a/lib/ansible/utils/module_docs_fragments/nxos.py b/lib/ansible/utils/module_docs_fragments/nxos.py index 37d287ea722..26312155c40 100644 --- a/lib/ansible/utils/module_docs_fragments/nxos.py +++ b/lib/ansible/utils/module_docs_fragments/nxos.py @@ -65,5 +65,12 @@ options: required: false default: false choices: BOOLEANS + provider: + description: + - Convience method that allows all M(nxos) arguments to be passed as + a dict object. All constraints (required, choices, etc) must be + met either by individual arguments or values in this dict. + required: false + default: null """ From 3750af45d42daf09b3b08c30b2e81222588fc071 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 19 Jan 2016 21:20:10 -0500 Subject: [PATCH 0421/1113] clarified with_file --- docsite/rst/playbooks_loops.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_loops.rst b/docsite/rst/playbooks_loops.rst index 6773e90789d..6f14922deaf 100644 --- a/docsite/rst/playbooks_loops.rst +++ b/docsite/rst/playbooks_loops.rst @@ -96,7 +96,7 @@ And you want to print every user's name and phone number. You can loop through Looping over Files `````````````````` -``with_file`` iterates over a list of files, setting `item` to the content of each file in sequence. It can be used like this:: +``with_file`` iterates over the content of a list of files, `item` will be set to the content of each file in sequence. It can be used like this:: --- - hosts: all From c09c01a1f523da3b6f1f5597f51f5fc446f5c1df Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 19 Jan 2016 18:37:59 -0500 Subject: [PATCH 0422/1113] go back to defaulting wrapping commands in shell this was taken out in an effort to default to the user's shell but creates issues as this is not known ahead of time and its painful to set executable and shell_type for all servers, it should only be needed for those that restrict the user to specific shells and when /bin/sh is not available. raw and command may still bypass this by explicitly passing None. fixes #13882 still conditional --- lib/ansible/plugins/action/__init__.py | 5 ++--- test/units/plugins/action/test_action.py | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 0b33d576c0d..ad305121500 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -481,8 +481,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): display.debug("done with _execute_module (%s, %s)" % (module_name, module_args)) return data - def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, - executable=None, encoding_errors='replace'): + def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=C.DEFAULT_EXECUTABLE, encoding_errors='replace'): ''' This is the function which executes the low level shell command, which may be commands to create/remove directories for temporary files, or to @@ -498,7 +497,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): ''' if executable is not None: - cmd = executable + ' -c ' + cmd + cmd = executable + ' -c ' + pipes.quote(cmd) display.debug("_low_level_execute_command(): starting") if not cmd: diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index 0e47b6a5381..afb5d767e10 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -49,7 +49,7 @@ class TestActionBase(unittest.TestCase): play_context.remote_user = 'apo' action_base._low_level_execute_command('ECHO', sudoable=True) - play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None) + play_context.make_become_cmd.assert_called_once_with("/bin/sh -c ECHO", executable='/bin/sh') play_context.make_become_cmd.reset_mock() @@ -58,6 +58,6 @@ class TestActionBase(unittest.TestCase): try: play_context.remote_user = 'root' action_base._low_level_execute_command('ECHO SAME', sudoable=True) - play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None) + play_context.make_become_cmd.assert_called_once_with("/bin/sh -c 'ECHO SAME'", executable='/bin/sh') finally: C.BECOME_ALLOW_SAME_USER = become_allow_same_user From c8bbdd6b399d90c239bca1890c5a7634211496d7 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 19 Jan 2016 22:42:27 -0500 Subject: [PATCH 0423/1113] Don't assign both parent blocks and task includes to blocks This causes problems when fetching parent attributes, as the include was being skipped because the parent block would fetch the attribute from the parent play first. Fixes #13872 --- lib/ansible/playbook/block.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index be73c5d8acd..0cf9711403f 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -44,12 +44,17 @@ class Block(Base, Become, Conditional, Taggable): def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False): self._play = play self._role = role - self._task_include = task_include - self._parent_block = parent_block + self._task_include = None + self._parent_block = None self._use_handlers = use_handlers self._implicit = implicit self._dep_chain = [] + if task_include: + self._task_include = task_include + elif parent_block: + self._parent_block = parent_block + super(Block, self).__init__() def get_vars(self): From e3a6accc1d25298c981bdef4e5412e3573730d0c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 20 Jan 2016 01:32:45 -0500 Subject: [PATCH 0424/1113] Forward conditionals onto included plays when conditional eval errors When using a playbook-level include, we now catch any errors raised during the conditional evaluation step and set a flag to indicate we need to pass those conditionals on to the included play (most likely because they contain inventory variables for evaluation). Fixes #14003 --- lib/ansible/playbook/playbook_include.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index d9af2ba5237..2ce076edb11 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -22,7 +22,7 @@ __metaclass__ = type import os from ansible.compat.six import iteritems -from ansible.errors import AnsibleParserError +from ansible.errors import AnsibleParserError, AnsibleError from ansible.parsing.splitter import split_args, parse_kv from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping from ansible.playbook.attribute import FieldAttribute @@ -60,8 +60,15 @@ class PlaybookInclude(Base, Conditional, Taggable): all_vars.update(variable_manager.get_vars(loader=loader)) templar = Templar(loader=loader, variables=all_vars) - if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars): - return None + + try: + forward_conditional = False + if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars): + return None + except AnsibleError: + # conditional evaluation raised an error, so we set a flag to indicate + # we need to forward the conditionals on to the included play(s) + forward_conditional = True # then we use the object to load a Playbook pb = Playbook(loader=loader) @@ -85,6 +92,13 @@ class PlaybookInclude(Base, Conditional, Taggable): if entry._included_path is None: entry._included_path = os.path.dirname(file_name) + # Check to see if we need to forward the conditionals on to the included + # plays. If so, we can take a shortcut here and simply prepend them to + # those attached to each block (if any) + if forward_conditional: + for task_block in entry.tasks: + task_block.when = self.when[:] + task_block.when + return pb def preprocess_data(self, ds): From b900c104b9a8db6de9c833ba1fece94802eb4cef Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg <greg@eucalyptus.com> Date: Wed, 20 Jan 2016 10:25:49 -0500 Subject: [PATCH 0425/1113] Adding submodule help --- docsite/rst/developing_test_pr.rst | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/docsite/rst/developing_test_pr.rst b/docsite/rst/developing_test_pr.rst index b6ec4e10ba5..af7c4bbb13f 100644 --- a/docsite/rst/developing_test_pr.rst +++ b/docsite/rst/developing_test_pr.rst @@ -81,27 +81,35 @@ and destination repositories. It will look something like this:: Someuser wants to merge 1 commit into ansible:devel from someuser:feature_branch_name .. note:: - It is important that the PR request target be ansible:devel, as we do not accept pull requests into any other branch. - Dot releases are cherry-picked manually by ansible staff. + It is important that the PR request target be ansible:devel, as we do not accept pull requests into any other branch. Dot releases are cherry-picked manually by ansible staff. The username and branch at the end are the important parts, which will be turned into git commands as follows:: git checkout -b testing_PRXXXX devel git pull https://github.com/someuser/ansible.git feature_branch_name -The first command creates and switches to a new branch named testing_PRXXXX, where the XXXX is the actual issue number associated -with the pull request (for example, 1234). This branch is based on the devel branch. The second command pulls the new code from the -users feature branch into the newly created branch. +The first command creates and switches to a new branch named testing_PRXXXX, where the XXXX is the actual issue number associated with the pull request (for example, 1234). This branch is based on the devel branch. The second command pulls the new code from the users feature branch into the newly created branch. .. note:: - If the GitHub user interface shows that the pull request will not merge cleanly, we do not recommend proceeding if you - are not somewhat familiar with git and coding, as you will have to resolve a merge conflict. This is the responsibility of - the original pull request contributor. + If the GitHub user interface shows that the pull request will not merge cleanly, we do not recommend proceeding if you are not somewhat familiar with git and coding, as you will have to resolve a merge conflict. This is the responsibility of the original pull request contributor. .. note:: - Some users do not create feature branches, which can cause problems when they have multiple, un-related commits in - their version of `devel`. If the source looks like `someuser:devel`, make sure there is only one commit listed on - the pull request. + Some users do not create feature branches, which can cause problems when they have multiple, un-related commits in their version of `devel`. If the source looks like `someuser:devel`, make sure there is only one commit listed on the pull request. + +Finding a Pull Request for Ansible Modules +++++++++++++++++++++++++++++++++++++++++++ +Ansible modules are in separate repositories, which are managed as Git submodules. Here's a step by step process for checking out a PR for an Ansible extras module, for instance: + +1. git clone https://github.com/ansible/ansible.git +2. cd ansible +3. git submodule init +4. git submodule update --recursive +5. cd lib/ansible/modules/extras +6. git fetch origin pull/1234/head:pr/1234 +7. git checkout pr/1234 +[ DO YOUR TESTING HERE ] +8. cd /path/to/ansible/clone +9. git submodule update --recursive For Those About To Test, We Salute You ++++++++++++++++++++++++++++++++++++++ From a4f91c2c53bc92acf391e0296cab33488865daef Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg <greg.dekoenigsberg@gmail.com> Date: Wed, 20 Jan 2016 10:27:37 -0500 Subject: [PATCH 0426/1113] format fix for github markup --- docsite/rst/developing_test_pr.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/developing_test_pr.rst b/docsite/rst/developing_test_pr.rst index af7c4bbb13f..1a5593ae446 100644 --- a/docsite/rst/developing_test_pr.rst +++ b/docsite/rst/developing_test_pr.rst @@ -107,9 +107,9 @@ Ansible modules are in separate repositories, which are managed as Git submodule 5. cd lib/ansible/modules/extras 6. git fetch origin pull/1234/head:pr/1234 7. git checkout pr/1234 -[ DO YOUR TESTING HERE ] -8. cd /path/to/ansible/clone -9. git submodule update --recursive +8. [ Do your tests here ] +9. cd /path/to/ansible/clone +10. git submodule update --recursive For Those About To Test, We Salute You ++++++++++++++++++++++++++++++++++++++ From a10e15c765c38896183f20d8b93f505afb702bf4 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg <greg.dekoenigsberg@gmail.com> Date: Wed, 20 Jan 2016 10:29:25 -0500 Subject: [PATCH 0427/1113] Update developing_test_pr.rst --- docsite/rst/developing_test_pr.rst | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/docsite/rst/developing_test_pr.rst b/docsite/rst/developing_test_pr.rst index 1a5593ae446..cf3d0d7536f 100644 --- a/docsite/rst/developing_test_pr.rst +++ b/docsite/rst/developing_test_pr.rst @@ -103,13 +103,12 @@ Ansible modules are in separate repositories, which are managed as Git submodule 1. git clone https://github.com/ansible/ansible.git 2. cd ansible 3. git submodule init -4. git submodule update --recursive +4. git submodule update --recursive [ fetches the submodules ] 5. cd lib/ansible/modules/extras -6. git fetch origin pull/1234/head:pr/1234 -7. git checkout pr/1234 -8. [ Do your tests here ] -9. cd /path/to/ansible/clone -10. git submodule update --recursive +6. git fetch origin pull/1234/head:pr/1234 [ fetches the specific PR ] +7. git checkout pr/1234 [ do your testing here ] +8. cd /path/to/ansible/clone +9. git submodule update --recursive For Those About To Test, We Salute You ++++++++++++++++++++++++++++++++++++++ From ac89b0de7a0cfa0109c598ed253761f553698cb4 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 20 Jan 2016 12:16:27 -0500 Subject: [PATCH 0428/1113] Fix incorrect handling of any_errors_fatal in the linear strategy Instead of bombing out of the strategy, we now properly mark hosts failed so that the play iterator can handle block rescue/always properly. Fixes #14024 --- lib/ansible/plugins/strategy/linear.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 40c435ca539..61b0c015035 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -342,13 +342,20 @@ class StrategyModule(StrategyBase): display.debug("results queue empty") display.debug("checking for any_errors_fatal") - had_failure = include_failure + failed_hosts = [] for res in results: if res.is_failed() or res.is_unreachable(): - had_failure = True - break - if task and task.any_errors_fatal and had_failure: - return False + failed_hosts.append(res._host.name) + + # if any_errors_fatal and we had an error, mark all hosts as failed + if task and task.any_errors_fatal and len(failed_hosts) > 0: + for host in hosts_left: + # don't double-mark hosts, or the iterator will potentially + # fail them out of the rescue/always states + if host.name not in failed_hosts: + self._tqm._failed_hosts[host.name] = True + iterator.mark_host_failed(host) + display.debug("done checking for any_errors_fatal") except (IOError, EOFError) as e: display.debug("got IOError/EOFError in task loop: %s" % e) From d49b11e9962df4bde4b8f3d61029305af4115748 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Wed, 20 Jan 2016 13:08:16 -0600 Subject: [PATCH 0429/1113] Only use os.path.basename if get_file_content returned a value, and ensure that service_mgr has line endings stripped. Fixes #14026 --- lib/ansible/module_utils/facts.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 3cb66a83e8c..18fa26332bf 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -559,9 +559,11 @@ class Facts(object): # also other OSs other than linux might need to check across several possible candidates # try various forms of querying pid 1 - proc_1 = os.path.basename(get_file_content('/proc/1/comm')) + proc_1 = get_file_content('/proc/1/comm') if proc_1 is None: rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True) + else: + proc_1 = os.path.basename(proc_1) if proc_1 == 'init' or proc_1.endswith('sh'): # many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container @@ -569,7 +571,7 @@ class Facts(object): # if not init/None it should be an identifiable or custom init, so we are done! if proc_1 is not None: - self.facts['service_mgr'] = proc_1 + self.facts['service_mgr'] = proc_1.strip() # start with the easy ones elif self.facts['distribution'] == 'MacOSX': From a68d90a71af4a799cd6f3bd3c3987432278a567a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 20 Jan 2016 09:04:44 -0800 Subject: [PATCH 0430/1113] rework run_command's env setting to not change os.environ for the rest of the module. New param to run_command to modify the environment for just this invocation. Documentation and comment adjustments. --- lib/ansible/module_utils/basic.py | 67 +++++++++++-------- .../module_utils/basic/test_run_command.py | 5 +- 2 files changed, 41 insertions(+), 31 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index b420f18e6e8..42ea8e79060 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -546,11 +546,10 @@ class AnsibleModule(object): if no_log_object: self.no_log_values.update(return_values(no_log_object)) - # check the locale as set by the current environment, and - # reset to LANG=C if it's an invalid/unavailable locale + # check the locale as set by the current environment, and reset to + # a known valid (LANG=C) if it's an invalid/unavailable locale self._check_locale() - self._check_arguments(check_invalid_arguments) # check exclusive early @@ -1094,7 +1093,6 @@ class AnsibleModule(object): # as it would be returned by locale.getdefaultlocale() locale.setlocale(locale.LC_ALL, '') except locale.Error: - e = get_exception() # fallback to the 'C' locale, which may cause unicode # issues but is preferable to simply failing because # of an unknown locale @@ -1757,25 +1755,29 @@ class AnsibleModule(object): # rename might not preserve context self.set_context_if_different(dest, context, False) - def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None): + def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None): ''' Execute a command, returns rc, stdout, and stderr. - args is the command to run - If args is a list, the command will be run with shell=False. - If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False - If args is a string and use_unsafe_shell=True it run with shell=True. - Other arguments: - - check_rc (boolean) Whether to call fail_json in case of - non zero RC. Default is False. - - close_fds (boolean) See documentation for subprocess.Popen(). - Default is True. - - executable (string) See documentation for subprocess.Popen(). - Default is None. - - prompt_regex (string) A regex string (not a compiled regex) which - can be used to detect prompts in the stdout - which would otherwise cause the execution - to hang (especially if no input data is - specified) + + :arg args: is the command to run + * If args is a list, the command will be run with shell=False. + * If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False + * If args is a string and use_unsafe_shell=True it runs with shell=True. + :kw check_rc: Whether to call fail_json in case of non zero RC. + Default False + :kw close_fds: See documentation for subprocess.Popen(). Default True + :kw executable: See documentation for subprocess.Popen(). Default None + :kw data: If given, information to write to the stdin of the command + :kw binary_data: If False, append a newline to the data. Default False + :kw path_prefix: If given, additional path to find the command in. + This adds to the PATH environment vairable so helper commands in + the same directory can also be found + :kw cwd: iIf given, working directory to run the command inside + :kw use_unsafe_shell: See `args` parameter. Default False + :kw prompt_regex: Regex string (not a compiled regex) which can be + used to detect prompts in the stdout which would otherwise cause + the execution to hang (especially if no input data is specified) + :kwarg environ_update: dictionary to *update* os.environ with ''' shell = False @@ -1806,10 +1808,15 @@ class AnsibleModule(object): msg = None st_in = None - # Set a temporary env path if a prefix is passed - env=os.environ + # Manipulate the environ we'll send to the new process + old_env_vals = {} + if environ_update: + for key, val in environ_update.items(): + old_env_vals[key] = os.environ.get(key, None) + os.environ[key] = val if path_prefix: - env['PATH']="%s:%s" % (path_prefix, env['PATH']) + old_env_vals['PATH'] = os.environ['PATH'] + os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH']) # create a printable version of the command for use # in reporting later, which strips out things like @@ -1851,11 +1858,10 @@ class AnsibleModule(object): close_fds=close_fds, stdin=st_in, stdout=subprocess.PIPE, - stderr=subprocess.PIPE + stderr=subprocess.PIPE, + env=os.environ, ) - if path_prefix: - kwargs['env'] = env if cwd and os.path.isdir(cwd): kwargs['cwd'] = cwd @@ -1934,6 +1940,13 @@ class AnsibleModule(object): except: self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args) + # Restore env settings + for key, val in old_env_vals.items(): + if val is None: + del os.environ[key] + else: + os.environ[key] = val + if rc != 0 and check_rc: msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values) self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg) diff --git a/test/units/module_utils/basic/test_run_command.py b/test/units/module_utils/basic/test_run_command.py index 09ab14b6d29..0db6fbe7b94 100644 --- a/test/units/module_utils/basic/test_run_command.py +++ b/test/units/module_utils/basic/test_run_command.py @@ -39,6 +39,7 @@ class OpenStringIO(StringIO): def close(self): pass + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") class TestAnsibleModuleRunCommand(unittest.TestCase): @@ -111,10 +112,6 @@ class TestAnsibleModuleRunCommand(unittest.TestCase): self.assertEqual(args, ('ls a " b" "c "', )) self.assertEqual(kwargs['shell'], True) - def test_path_prefix(self): - self.module.run_command('foo', path_prefix='/opt/bin') - self.assertEqual('/opt/bin', self.os.environ['PATH'].split(':')[0]) - def test_cwd(self): self.os.getcwd.return_value = '/old' self.module.run_command('/bin/ls', cwd='/new') From a1063827867cada5414577a85e2f13121cb7b948 Mon Sep 17 00:00:00 2001 From: Matthew Jones <mat@matburt.net> Date: Wed, 20 Jan 2016 15:03:56 -0500 Subject: [PATCH 0431/1113] Add a config option for rackspace inventory cache Adding a config and environment variable option for tuning the cache age check in the rackspace inventory module --- contrib/inventory/rax.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/contrib/inventory/rax.py b/contrib/inventory/rax.py index 0028f54d201..4ac6b0f47e9 100755 --- a/contrib/inventory/rax.py +++ b/contrib/inventory/rax.py @@ -355,9 +355,12 @@ def get_cache_file_path(regions): def _list(regions, refresh_cache=True): + cache_max_age = int(get_config(p, 'rax', 'cache_max_age', + 'RAX_CACHE_MAX_AGE', 600)) + if (not os.path.exists(get_cache_file_path(regions)) or refresh_cache or - (time() - os.stat(get_cache_file_path(regions))[-1]) > 600): + (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age): # Cache file doesn't exist or older than 10m or refresh cache requested _list_into_cache(regions) From 3201f5d90e311735b54af21fc621a9197e1eb788 Mon Sep 17 00:00:00 2001 From: Selivanov Pavel <selivan2@gmail.com> Date: Wed, 20 Jan 2016 23:11:44 +0300 Subject: [PATCH 0432/1113] plugins/strategy: added significant details to parser error message. See discussion at https://github.com/ansible/ansible/issues/13753 --- lib/ansible/plugins/strategy/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 0b7f48b5631..3013eac3d70 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -492,7 +492,7 @@ class StrategyBase: tags = [ tags ] if len(tags) > 0: if len(b._task_include.tags) > 0: - raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task)", + raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement", obj=included_file._task._ds) display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option") b._task_include.tags = tags From 61009604e33c0e20125d01fc5ed176fec43cdfd5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 20 Jan 2016 12:18:52 -0800 Subject: [PATCH 0433/1113] Update submodules to bring in yum fix --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index ffea58ee86d..d7fac82f97c 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit ffea58ee86dbee20dc272c74cd5f8e02f6f317e6 +Subproject commit d7fac82f97c153af08dbea2b2ae9718b19abeb8a diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index e9450df8786..f798240f436 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit e9450df878632531fae574b5eaf28bf0f7916948 +Subproject commit f798240f436a16a828f48759bbd176b6bccdfe75 From a1318e16641a89cfbd41d072670d374bbd0b3cf7 Mon Sep 17 00:00:00 2001 From: Matthew Jones <mat@matburt.net> Date: Wed, 20 Jan 2016 15:27:06 -0500 Subject: [PATCH 0434/1113] Add rax cache age ini documentation --- contrib/inventory/rax.ini | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/contrib/inventory/rax.ini b/contrib/inventory/rax.ini index 5a269e16a3a..15948e7b2e6 100644 --- a/contrib/inventory/rax.ini +++ b/contrib/inventory/rax.ini @@ -55,3 +55,12 @@ # will be ignored, and 4 will be used. Accepts a comma separated list, # the first found wins. # access_ip_version = 4 + +# Environment Variable: RAX_CACHE_MAX_AGE +# Default: 600 +# +# A configuration the changes the behavior or the inventory cache. +# Inventory listing performed before this value will be returned from +# the cache instead of making a full request for all inventory. Setting +# this value to 0 will force a full request. +# cache_max_age = 600 \ No newline at end of file From 54cde0d08244fe6eb0fb53fc9f3174eee6f660aa Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 20 Jan 2016 15:26:45 -0500 Subject: [PATCH 0435/1113] Standardize removal of BECOME-SUCCESS method and use it for async too Fixes #13965 Fixes #13971 --- lib/ansible/plugins/action/__init__.py | 9 +++++++++ lib/ansible/plugins/action/async.py | 4 ++++ lib/ansible/plugins/action/raw.py | 5 +---- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index ad305121500..62a2e7806f0 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -24,6 +24,7 @@ import json import os import pipes import random +import re import stat import tempfile import time @@ -356,6 +357,14 @@ class ActionBase(with_metaclass(ABCMeta, object)): return data[idx:] + def _strip_success_message(self, data): + ''' + Removes the BECOME-SUCCESS message from the data. + ''' + if data.strip().startswith('BECOME-SUCCESS-'): + data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data) + return data + def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True): ''' Transfer and run a module along with its arguments. diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py index 8a7175aeb86..5e04f37ff12 100644 --- a/lib/ansible/plugins/action/async.py +++ b/lib/ansible/plugins/action/async.py @@ -75,4 +75,8 @@ class ActionModule(ActionBase): result['changed'] = True + # be sure to strip out the BECOME-SUCCESS message, which may + # be there depending on the output of the module + result['stdout'] = self._strip_success_message(result.get('stdout', '')) + return result diff --git a/lib/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py index d6fa2f35599..c9718db4135 100644 --- a/lib/ansible/plugins/action/raw.py +++ b/lib/ansible/plugins/action/raw.py @@ -19,8 +19,6 @@ __metaclass__ = type from ansible.plugins.action import ActionBase -import re - class ActionModule(ActionBase): TRANSFERS_FILES = False @@ -42,7 +40,6 @@ class ActionModule(ActionBase): # for some modules (script, raw), the sudo success key # may leak into the stdout due to the way the sudo/su # command is constructed, so we filter that out here - if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'): - result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout']) + result['stdout'] = self._strip_success_message(result.get('stdout', '')) return result From c2ac1507ea7b34ed7ce7ea957e3c8c8e6377625a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 20 Jan 2016 18:31:40 -0500 Subject: [PATCH 0436/1113] corrected host/group match in inventory_hostnames now the lookup works when using ! and & operators fixes #13997 --- lib/ansible/plugins/lookup/inventory_hostnames.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/lookup/inventory_hostnames.py b/lib/ansible/plugins/lookup/inventory_hostnames.py index a86d2270bba..651055b6f74 100644 --- a/lib/ansible/plugins/lookup/inventory_hostnames.py +++ b/lib/ansible/plugins/lookup/inventory_hostnames.py @@ -26,10 +26,15 @@ class LookupModule(LookupBase): def get_hosts(self, variables, pattern): hosts = [] - if pattern in variables['groups']: - hosts = variables['groups'][pattern] - elif pattern in variables['groups']['all']: - hosts = [pattern] + if pattern[0] in ('!','&'): + obj = pattern[1:] + else: + obj = pattern + + if obj in variables['groups']: + hosts = variables['groups'][obj] + elif obj in variables['groups']['all']: + hosts = [obj] return hosts def run(self, terms, variables=None, **kwargs): From 365c5b23ce8dad5fcbaadcb9b86b5f7da4874437 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 20 Jan 2016 18:23:30 -0500 Subject: [PATCH 0437/1113] Re-add cache clearing call to Inventory init This prevents a bug where the existing cache outside of the class is not cleared when creating a new Inventory object. This only really affects people using the API directly right now, but wanted to fix it to prevent weird errors from popping up. --- lib/ansible/inventory/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index eb8d1905502..3d9ad3516d9 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -78,6 +78,10 @@ class Inventory(object): self._restriction = None self._subset = None + # clear the cache here, which is only useful if more than + # one Inventory objects are created when using the API directly + self.clear_pattern_cache() + self.parse_inventory(host_list) def serialize(self): From 627dec716b1aee04676e2b231609ff0890cd966e Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 20 Jan 2016 20:53:31 -0500 Subject: [PATCH 0438/1113] Template the run_once value in the linear strategy as we use it there This is pre-post_validation, so we have to template it on the fly as we use it to determine if we bypass the host loop. Fixes #11876 --- lib/ansible/plugins/strategy/linear.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 61b0c015035..7750f50ff05 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -194,8 +194,6 @@ class StrategyModule(StrategyBase): try: action = action_loader.get(task.action, class_only=True) - if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False): - run_once = True except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin @@ -227,6 +225,8 @@ class StrategyModule(StrategyBase): templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") + run_once = templar.template(task.run_once) + if not callback_sent: display.debug("sending task start callback, copying the task so we can template it temporarily") saved_name = task.name @@ -249,7 +249,7 @@ class StrategyModule(StrategyBase): self._queue_task(host, task, task_vars, play_context) # if we're bypassing the host loop, break out now - if run_once: + if run_once or getattr(action, 'BYPASS_HOST_LOOP', False): break results += self._process_pending_results(iterator, one_pass=True) From 1325c21ca079d7214b4d549736034bc5e3bd35fe Mon Sep 17 00:00:00 2001 From: Sergii Korochkin <sergii.korochkin@pb.com> Date: Thu, 21 Jan 2016 14:00:29 +0200 Subject: [PATCH 0439/1113] Adding support for scm-based role source urls (incl. integration test to cover it) --- lib/ansible/cli/galaxy.py | 4 ++-- test/integration/Makefile | 12 ++++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 476a7d0f897..08488154e25 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -385,7 +385,8 @@ class GalaxyCLI(CLI): # roles were specified directly, so we'll just go out grab them # (and their dependencies, unless the user doesn't want us to). for rname in self.args: - roles_left.append(GalaxyRole(self.galaxy, rname.strip())) + role = RoleRequirement.role_yaml_parse(rname.strip()) + roles_left.append(GalaxyRole(self.galaxy, **role)) for role in roles_left: display.vvv('Installing role %s ' % role.name) @@ -681,4 +682,3 @@ class GalaxyCLI(CLI): display.display(resp['status']) return True - diff --git a/test/integration/Makefile b/test/integration/Makefile index dcd30f0b836..95b9d4320c4 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -168,7 +168,7 @@ else @echo "Consul agent is not running locally. To run a cluster locally see http://github.com/sgargan/consul-vagrant" endif -test_galaxy: test_galaxy_spec test_galaxy_yaml +test_galaxy: test_galaxy_spec test_galaxy_yaml test_galaxy_git test_galaxy_spec: mytmpdir=$(MYTMPDIR) ; \ @@ -188,10 +188,18 @@ test_galaxy_yaml: rm -rf $$mytmpdir ; \ exit $$RC +test_galaxy_git: + mytmpdir=$(MYTMPDIR) ; \ + ansible-galaxy install git+https://bitbucket.org/willthames/git-ansible-galaxy,v1.6 -p $$mytmpdir/roles -vvvv; \ + cp galaxy_playbook_git.yml $$mytmpdir ; \ + ansible-playbook -i $(INVENTORY) $$mytmpdir/galaxy_playbook_git.yml -v $(TEST_FLAGS) ; \ + RC=$$? ; \ + rm -rf $$mytmpdir ; \ + exit $$RC + test_lookup_paths: ansible-playbook lookup_paths/play.yml -i $(INVENTORY) -v $(TEST_FLAGS) no_log: # This test expects 7 loggable vars and 0 non loggable ones, if either mismatches it fails, run the ansible-playbook command to debug [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -vvvvv | awk --source 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ] - From 13e8732598404d59d4044dd5a6781b7447fae49b Mon Sep 17 00:00:00 2001 From: Sergii Korochkin <sergii.korochkin@pb.com> Date: Thu, 21 Jan 2016 14:08:26 +0200 Subject: [PATCH 0440/1113] [hotfix] add missed playbook file --- test/integration/galaxy_playbook_git.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 test/integration/galaxy_playbook_git.yml diff --git a/test/integration/galaxy_playbook_git.yml b/test/integration/galaxy_playbook_git.yml new file mode 100644 index 00000000000..1d9b03b22a2 --- /dev/null +++ b/test/integration/galaxy_playbook_git.yml @@ -0,0 +1,5 @@ +- hosts: localhost + connection: local + + roles: + - "git-ansible-galaxy" From f26adcc7da7f8e6605167203249648f7b0e74fb7 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 21 Jan 2016 10:53:02 -0500 Subject: [PATCH 0441/1113] avoid shredding empty files, also x/0 also cleaned up unused import and exception var --- lib/ansible/parsing/vault/__init__.py | 32 ++++++++++++++------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index f38525e028d..dc30dd0ffbd 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -71,7 +71,7 @@ try: except ImportError: pass -from ansible.compat.six import PY3, byte2int +from ansible.compat.six import PY3 from ansible.utils.unicode import to_unicode, to_bytes HAS_ANY_PBKDF2HMAC = HAS_PBKDF2 or HAS_PBKDF2HMAC @@ -236,22 +236,24 @@ class VaultEditor: """ file_len = os.path.getsize(tmp_path) - max_chunk_len = min(1024*1024*2, file_len) - passes = 3 - with open(tmp_path, "wb") as fh: - for _ in range(passes): - fh.seek(0, 0) - # get a random chunk of data, each pass with other length - chunk_len = random.randint(max_chunk_len//2, max_chunk_len) - data = os.urandom(chunk_len) + if file_len > 0: # avoid work when file was empty + max_chunk_len = min(1024*1024*2, file_len) - for _ in range(0, file_len // chunk_len): - fh.write(data) - fh.write(data[:file_len % chunk_len]) + passes = 3 + with open(tmp_path, "wb") as fh: + for _ in range(passes): + fh.seek(0, 0) + # get a random chunk of data, each pass with other length + chunk_len = random.randint(max_chunk_len//2, max_chunk_len) + data = os.urandom(chunk_len) - assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy - os.fsync(fh) + for _ in range(0, file_len // chunk_len): + fh.write(data) + fh.write(data[:file_len % chunk_len]) + + assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy + os.fsync(fh) def _shred_file(self, tmp_path): @@ -273,7 +275,7 @@ class VaultEditor: try: r = call(['shred', tmp_path]) - except OSError as e: + except OSError: # shred is not available on this system, or some other error occured. r = 1 From d6ae9e2c291cdc8f60f066c7339ef28731e96d4e Mon Sep 17 00:00:00 2001 From: Tobias Wolf <towolf@gmail.com> Date: Tue, 19 Jan 2016 17:56:07 +0100 Subject: [PATCH 0442/1113] Avoid recursively checking JSON inventory for Unicode by moving to en-bloc unicode conversion to act on scripts stdout Both python-json and simplejson always return unicode strings when using their loads() method on unicode strings. This is true at least since 2009. This makes checking each substring unnecessary, because we do not need to recursively check the strings contained in the inventory dict later one-by-one This commit makes parsing of large dynamic inventory at least 2 seconds faster. cf: https://github.com/towolf/ansible-large-inventory-testcase --- lib/ansible/inventory/script.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index 042fa8c24a2..999e472f539 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -31,7 +31,7 @@ from ansible.errors import AnsibleError from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.module_utils.basic import json_dict_bytes_to_unicode -from ansible.utils.unicode import to_str +from ansible.utils.unicode import to_str, to_unicode class InventoryScript: @@ -58,7 +58,13 @@ class InventoryScript: if sp.returncode != 0: raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) - self.data = stdout + # make sure script output is unicode so that json loader will output + # unicode strings itself + try: + self.data = to_unicode(stdout, errors="strict") + except Exception as e: + raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_str(self.filename), to_str(e))) + # see comment about _meta below self.host_vars_from_top = None self._parse(stderr) @@ -78,8 +84,6 @@ class InventoryScript: sys.stderr.write(err + "\n") raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(to_str(self.filename))) - self.raw = json_dict_bytes_to_unicode(self.raw) - group = None for (group_name, data) in self.raw.items(): From 981e9c44ba7b18b87eb7c4d21ef0235b5258e42e Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg <greg.dekoenigsberg@gmail.com> Date: Thu, 21 Jan 2016 12:22:26 -0500 Subject: [PATCH 0443/1113] Put Python 2.4 in checklist It's in the travis check, but we don't spell it out explicitly in the guidelines. --- docsite/rst/developing_modules.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index f945cc24bca..4be3415d349 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -479,6 +479,7 @@ Module checklist ```````````````` * The shebang should always be #!/usr/bin/python, this allows ansible_python_interpreter to work +* Modules must be written to support Python 2.4. * Documentation: Make sure it exists * `required` should always be present, be it true or false * If `required` is false you need to document `default`, even if the default is 'null' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code. From f3336fba448bdc37743982b0febce5c210b9249a Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg <greg.dekoenigsberg@gmail.com> Date: Thu, 21 Jan 2016 13:02:03 -0500 Subject: [PATCH 0444/1113] Tweaking Python 2.4 requirement --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 4be3415d349..dc5b7e8f5ff 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -479,7 +479,7 @@ Module checklist ```````````````` * The shebang should always be #!/usr/bin/python, this allows ansible_python_interpreter to work -* Modules must be written to support Python 2.4. +* Modules must be written to support Python 2.4. If this is not possible, required minimum python version and rationale should be explained in the requirements section in DOCUMENTATION. * Documentation: Make sure it exists * `required` should always be present, be it true or false * If `required` is false you need to document `default`, even if the default is 'null' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code. From ecf867af6fbc213b67c9311030444b2308025cca Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 21 Jan 2016 11:11:05 -0800 Subject: [PATCH 0445/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index d7fac82f97c..09e2457eb0e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit d7fac82f97c153af08dbea2b2ae9718b19abeb8a +Subproject commit 09e2457eb0e811ac293065dd77cd31597ceb2da7 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f798240f436..e8427cb32a0 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f798240f436a16a828f48759bbd176b6bccdfe75 +Subproject commit e8427cb32a07ebaa4682192675a075fc336f6564 From d02dee37a10026ab9e7d636087142d6031265bd0 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 21 Jan 2016 15:02:04 -0500 Subject: [PATCH 0446/1113] Disallow setting state on template tasks Fixes #14056 --- lib/ansible/plugins/action/template.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index d134f80a8df..d8339e57b90 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -63,8 +63,13 @@ class ActionModule(ActionBase): dest = self._task.args.get('dest', None) faf = self._task.first_available_file force = boolean(self._task.args.get('force', True)) + state = self._task.args.get('state', None) - if (source is None and faf is not None) or dest is None: + if state is not None: + result['failed'] = True + result['msg'] = "'state' cannot be specified on a template" + return result + elif (source is None and faf is not None) or dest is None: result['failed'] = True result['msg'] = "src and dest are required" return result From 465115594215d68a3a1090f0e8b3eeeecadba2aa Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 21 Jan 2016 16:41:05 -0500 Subject: [PATCH 0447/1113] Fix bug with any_errors_fatal where task was not available --- lib/ansible/plugins/strategy/linear.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 7750f50ff05..804cfadc776 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -177,6 +177,9 @@ class StrategyModule(StrategyBase): skip_rest = False choose_step = True + # flag set if task is set to any_errors_fatal + any_errors_fatal = False + results = [] for (host, task) in host_tasks: if not task: @@ -188,6 +191,9 @@ class StrategyModule(StrategyBase): run_once = False work_to_do = True + if task.any_errors_fatal: + any_errors_fatal = True + # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. @@ -348,7 +354,7 @@ class StrategyModule(StrategyBase): failed_hosts.append(res._host.name) # if any_errors_fatal and we had an error, mark all hosts as failed - if task and task.any_errors_fatal and len(failed_hosts) > 0: + if any_errors_fatal and len(failed_hosts) > 0: for host in hosts_left: # don't double-mark hosts, or the iterator will potentially # fail them out of the rescue/always states From 3b71710827bc3fbf620174e81d5c16a6389fe9b5 Mon Sep 17 00:00:00 2001 From: Emilien Kenler <ekenler@wizcorp.jp> Date: Mon, 3 Aug 2015 17:12:47 +0900 Subject: [PATCH 0448/1113] ansible.utils._git_repo_info() now supports branch names with slashes --- lib/ansible/cli/__init__.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 12ba8f89004..f3e6df0ed7f 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -393,16 +393,20 @@ class CLI(object): except (IOError, AttributeError): return '' f = open(os.path.join(repo_path, "HEAD")) - branch = f.readline().split('/')[-1].rstrip("\n") + line = f.readline().rstrip("\n") + if line.startswith("ref:"): + branch_path = os.path.join(repo_path, line[5:]) + else: + branch_path = None f.close() - branch_path = os.path.join(repo_path, "refs", "heads", branch) - if os.path.exists(branch_path): + if branch_path and os.path.exists(branch_path): + branch = '/'.join(line.split('/')[2:]) f = open(branch_path) commit = f.readline()[:10] f.close() else: # detached HEAD - commit = branch[:10] + commit = line[:10] branch = 'detached HEAD' branch_path = os.path.join(repo_path, "HEAD") From 46bd5a0d9c77f4fa82b84ab3f1417cb8acb01703 Mon Sep 17 00:00:00 2001 From: Ilya Novickov <il.novikov@gmail.com> Date: Fri, 22 Jan 2016 18:50:41 +0800 Subject: [PATCH 0449/1113] Fix server numbers in first 10 next 10 example --- docsite/rst/playbooks_best_practices.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index ecca4d75cb5..653d7f8a196 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -254,8 +254,8 @@ What about just my webservers in Boston?:: What about just the first 10, and then the next 10?:: - ansible-playbook -i production webservers.yml --limit boston[0-10] - ansible-playbook -i production webservers.yml --limit boston[10-20] + ansible-playbook -i production webservers.yml --limit boston[1-10] + ansible-playbook -i production webservers.yml --limit boston[11-20] And of course just basic ad-hoc stuff is also possible.:: From fb797a9e7786a3fde70e7de34ccf9ac3946501b3 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 22 Jan 2016 12:44:56 -0500 Subject: [PATCH 0450/1113] Fixing role dependency chain creation The dep chain for roles created during the compile step had bugs, in which the dep chain was overwriten and the original tasks in the role were not assigned a dep chain. This lead to problems in determining whether roles had already run when in a "diamond" structure, and in some cases roles were not correctly getting variables from parents. Fixes #14046 --- lib/ansible/executor/play_iterator.py | 13 ++++++++++++- lib/ansible/playbook/role/__init__.py | 16 +++++++++------- .../roles/test_var_precedence_dep/tasks/main.yml | 2 +- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 584cfb0fe67..09caeec2d98 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -49,6 +49,7 @@ class HostState: self.cur_rescue_task = 0 self.cur_always_task = 0 self.cur_role = None + self.cur_dep_chain = None self.run_state = PlayIterator.ITERATING_SETUP self.fail_state = PlayIterator.FAILED_NONE self.pending_setup = False @@ -102,6 +103,8 @@ class HostState: new_state.run_state = self.run_state new_state.fail_state = self.fail_state new_state.pending_setup = self.pending_setup + if self.cur_dep_chain is not None: + new_state.cur_dep_chain = self.cur_dep_chain[:] if self.tasks_child_state is not None: new_state.tasks_child_state = self.tasks_child_state.copy() if self.rescue_child_state is not None: @@ -212,13 +215,21 @@ class PlayIterator: s.pending_setup = False if not task: + old_s = s (s, task) = self._get_next_task_from_state(s, peek=peek) + def _roles_are_different(ra, rb): + if ra != rb: + return True + else: + return old_s.cur_dep_chain != task._block._dep_chain + if task and task._role: # if we had a current role, mark that role as completed - if s.cur_role and task._role != s.cur_role and host.name in s.cur_role._had_task_run and not peek: + if s.cur_role and _roles_are_different(task._role, s.cur_role) and host.name in s.cur_role._had_task_run and not peek: s.cur_role._completed[host.name] = True s.cur_role = task._role + s.cur_dep_chain = task._block._dep_chain if not peek: self._host_states[host.name] = s diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 10e14b4ac38..f192ea6c945 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -323,7 +323,7 @@ class Role(Base, Become, Conditional, Taggable): return host.name in self._completed and not self._metadata.allow_duplicates - def compile(self, play, dep_chain=[]): + def compile(self, play, dep_chain=None): ''' Returns the task list for this role, which is created by first recursively compiling the tasks for all direct dependencies, and @@ -337,18 +337,20 @@ class Role(Base, Become, Conditional, Taggable): block_list = [] # update the dependency chain here + if dep_chain is None: + dep_chain = [] new_dep_chain = dep_chain + [self] deps = self.get_direct_dependencies() for dep in deps: dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain) - for dep_block in dep_blocks: - new_dep_block = dep_block.copy() - new_dep_block._dep_chain = new_dep_chain - new_dep_block._play = play - block_list.append(new_dep_block) + block_list.extend(dep_blocks) - block_list.extend(self._task_blocks) + for task_block in self._task_blocks: + new_task_block = task_block.copy() + new_task_block._dep_chain = new_dep_chain + new_task_block._play = play + block_list.append(new_task_block) return block_list diff --git a/test/integration/roles/test_var_precedence_dep/tasks/main.yml b/test/integration/roles/test_var_precedence_dep/tasks/main.yml index b50f9dfc271..2f8e17096bc 100644 --- a/test/integration/roles/test_var_precedence_dep/tasks/main.yml +++ b/test/integration/roles/test_var_precedence_dep/tasks/main.yml @@ -7,7 +7,7 @@ - assert: that: - 'extra_var == "extra_var"' - - 'param_var == "param_var"' + - 'param_var == "param_var_role1"' - 'vars_var == "vars_var"' - 'vars_files_var == "vars_files_var"' - 'vars_files_var_role == "vars_files_var_dep"' From 46ede563dd4b02bd6652745581a80d6f982e7195 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 23 Jan 2016 09:25:50 -0500 Subject: [PATCH 0451/1113] Use templated handler name during callback Fixes #14082 --- lib/ansible/plugins/strategy/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 3013eac3d70..27e7b2f9741 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -545,7 +545,10 @@ class StrategyBase: # self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') # result = False # break + saved_name = handler.name + handler.name = handler_name self._tqm.send_callback('v2_playbook_on_handler_task_start', handler) + handler.name = saved_name if notified_hosts is None: notified_hosts = self._notified_handlers[handler_name] From f82d9facc2f23ad1bd7e16d8e36ac50587b0df2c Mon Sep 17 00:00:00 2001 From: Graham Ullrich <graham@flyingcracker.com> Date: Sat, 23 Jan 2016 12:35:59 -0700 Subject: [PATCH 0452/1113] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e7cab6322a4..4037ea7f9f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ Ansible Changes By Release This re-executes inventory scripts, but does not force them to ignore any cache they might use. * New delegate_facts directive, a boolean that allows you to apply facts to the delegated host (true/yes) instead of the inventory_hostname (no/false) which is the default and previous behaviour. * local connections now work with 'su' as a privilege escalation method +* Ansible 2.0 has deprecated the “ssh” from ansible_ssh_user, ansible_ssh_host, and ansible_ssh_port to become ansible_user, ansible_host, and ansible_port. * New ssh configuration variables(`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a per-group or per-host ssh ProxyCommand or set any other ssh options. `ansible_ssh_extra_args` is used to set options that are accepted only by ssh (not sftp or scp, which have their own analogous settings). From e5493fa6315008bf8b92b2f248e2583b6d074687 Mon Sep 17 00:00:00 2001 From: Matt Harris <matthaeus.harris@gmail.com> Date: Sun, 24 Jan 2016 00:57:04 -0800 Subject: [PATCH 0453/1113] Added support for proxmox 4.x --- contrib/inventory/proxmox.py | 72 ++++++++++++++++++++++++++++++++---- 1 file changed, 64 insertions(+), 8 deletions(-) diff --git a/contrib/inventory/proxmox.py b/contrib/inventory/proxmox.py index ab65c342e4e..0d0ee86a4ef 100755 --- a/contrib/inventory/proxmox.py +++ b/contrib/inventory/proxmox.py @@ -15,6 +15,16 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +# Updated 2016 by Matt Harris <matthaeus.harris@gmail.com> +# +# Added support for Proxmox VE 4.x +# Added support for using the Notes field of a VM to define groups and variables: +# A well-formatted JSON object in the Notes field will be added to the _meta +# section for that VM. In addition, the "groups" key of this JSON object may be +# used to specify group membership: +# +# { "groups": ["utility", "databases"], "a": false, "b": true } + import urllib try: import json @@ -32,29 +42,29 @@ class ProxmoxNodeList(list): def get_names(self): return [node['node'] for node in self] -class ProxmoxQemu(dict): +class ProxmoxVM(dict): def get_variables(self): variables = {} for key, value in iteritems(self): variables['proxmox_' + key] = value return variables -class ProxmoxQemuList(list): +class ProxmoxVMList(list): def __init__(self, data=[]): for item in data: - self.append(ProxmoxQemu(item)) + self.append(ProxmoxVM(item)) def get_names(self): - return [qemu['name'] for qemu in self if qemu['template'] != 1] + return [vm['name'] for vm in self if vm['template'] != 1] def get_by_name(self, name): - results = [qemu for qemu in self if qemu['name'] == name] + results = [vm for vm in self if vm['name'] == name] return results[0] if len(results) > 0 else None def get_variables(self): variables = {} - for qemu in self: - variables[qemu['name']] = qemu.get_variables() + for vm in self: + variables[vm['name']] = vm.get_variables() return variables @@ -105,8 +115,23 @@ class ProxmoxAPI(object): def nodes(self): return ProxmoxNodeList(self.get('api2/json/nodes')) + def vms_by_type(self, node, type): + return ProxmoxVMList(self.get('api2/json/nodes/{}/{}'.format(node, type))) + + def vm_description_by_type(self, node, vm, type): + return self.get('api2/json/nodes/{}/{}/{}/config'.format(node, type, vm)) + def node_qemu(self, node): - return ProxmoxQemuList(self.get('api2/json/nodes/{}/qemu'.format(node))) + return self.vms_by_type(node, 'qemu') + + def node_qemu_description(self, node, vm): + return self.vm_description_by_type(node, vm, 'qemu') + + def node_lxc(self, node): + return self.vms_by_type(node, 'lxc') + + def node_lxc_description(self, node, vm): + return self.vm_description_by_type(node, vm, 'lxc') def pools(self): return ProxmoxPoolList(self.get('api2/json/pools')) @@ -131,6 +156,37 @@ def main_list(options): qemu_list = proxmox_api.node_qemu(node) results['all']['hosts'] += qemu_list.get_names() results['_meta']['hostvars'].update(qemu_list.get_variables()) + lxc_list = proxmox_api.node_lxc(node) + results['all']['hosts'] += lxc_list.get_names() + results['_meta']['hostvars'].update(lxc_list.get_variables()) + + for vm in results['_meta']['hostvars']: + vmid = results['_meta']['hostvars'][vm]['proxmox_vmid'] + try: + type = results['_meta']['hostvars'][vm]['proxmox_type'] + except KeyError: + type = 'qemu' + try: + description = proxmox_api.vm_description_by_type(node, vmid, type)['description'] + except KeyError: + description = None + + try: + metadata = json.loads(description) + except TypeError: + metadata = {} + # print metadata + + if 'groups' in metadata: + # print metadata + for group in metadata['groups']: + if group not in results: + results[group] = { + 'hosts': [] + } + results[group]['hosts'] += [vm] + + results['_meta']['hostvars'][vm].update(metadata) # pools for pool in proxmox_api.pools().get_names(): From f8e73714f6dbed49a6086b89075acf54d2a3cbba Mon Sep 17 00:00:00 2001 From: Matt Harris <matthaeus.harris@gmail.com> Date: Sun, 24 Jan 2016 01:10:47 -0800 Subject: [PATCH 0454/1113] If the notes don't contain JSON, add the string to the notes key of _meta --- contrib/inventory/proxmox.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/contrib/inventory/proxmox.py b/contrib/inventory/proxmox.py index 0d0ee86a4ef..c0ffb0b16c6 100755 --- a/contrib/inventory/proxmox.py +++ b/contrib/inventory/proxmox.py @@ -175,7 +175,10 @@ def main_list(options): metadata = json.loads(description) except TypeError: metadata = {} - # print metadata + except ValueError: + metadata = { + 'notes': description + } if 'groups' in metadata: # print metadata From 2b02e8e2b43a7b5e10e0c12b0bbecedfb00f50c0 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 24 Jan 2016 23:02:55 -0500 Subject: [PATCH 0455/1113] Allow certain base attributes to be used as role params Role definitions typically require params to be different from those which are specified as FieldAttributes on the playbook classes used for roles, however a certain subset should be allowed (typically those used for connection stuff). Fixes #14095 --- lib/ansible/playbook/role/definition.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py index 0af49cec91c..ac7f40050c4 100644 --- a/lib/ansible/playbook/role/definition.py +++ b/lib/ansible/playbook/role/definition.py @@ -188,7 +188,12 @@ class RoleDefinition(Base, Become, Conditional, Taggable): for (key, value) in iteritems(ds): # use the list of FieldAttribute values to determine what is and is not # an extra parameter for this role (or sub-class of this role) - if key not in base_attribute_names: + # FIXME: hard-coded list of exception key names here corresponds to the + # connection fields in the Base class. There may need to be some + # other mechanism where we exclude certain kinds of field attributes, + # or make this list more automatic in some way so we don't have to + # remember to update it manually. + if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'): # this key does not match a field attribute, so it must be a role param role_params[key] = value else: From 6cc7f7890f9bb7814ba111dfd6c771b5edb16726 Mon Sep 17 00:00:00 2001 From: Robin Naundorf <r.naundorf@fh-muenster.de> Date: Mon, 25 Jan 2016 11:51:30 +0100 Subject: [PATCH 0456/1113] Fixes documentation formatting Fixes documentation formatting --- docsite/rst/intro_dynamic_inventory.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index bbaf1a0fff4..71f64081763 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -258,7 +258,7 @@ Download the latest version of the OpenStack dynamic inventory script, make it e chmod +x openstack.py sudo cp openstack.py /etc/ansible/hosts -Download the sample configuration file, modify it to suit your needs and copy it to /etc/ansible/openstack.yml +Download the sample configuration file, modify it to suit your needs and copy it to `/etc/ansible/openstack.yml`:: wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.yml vi openstack.yml From a8ffa021344b5548813790f18dda68d32717a4d7 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 25 Jan 2016 09:02:44 -0500 Subject: [PATCH 0457/1113] Make sure blocks use their parents dependency chains Fixes a bug inroduced in fb797a9 where included tasks in a role were not being executed because the child blocks had no dep chain set. --- lib/ansible/playbook/block.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 0cf9711403f..b31ffbcfe8b 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -48,13 +48,17 @@ class Block(Base, Become, Conditional, Taggable): self._parent_block = None self._use_handlers = use_handlers self._implicit = implicit - self._dep_chain = [] if task_include: self._task_include = task_include elif parent_block: self._parent_block = parent_block + if parent_block: + self._dep_chain = parent_block._dep_chain[:] + else: + self._dep_chain = [] + super(Block, self).__init__() def get_vars(self): @@ -374,3 +378,4 @@ class Block(Base, Become, Conditional, Taggable): def has_tasks(self): return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0 + From 043b2cbcb4b501cb74f2d30f8986c608e71e03b2 Mon Sep 17 00:00:00 2001 From: Matthew Seaman <m.seaman@infracaninophile.co.uk> Date: Mon, 25 Jan 2016 16:20:17 +0000 Subject: [PATCH 0458/1113] Change example to use pkg rather than the obsolete pkg_add pkg_add has been obsoleted in all released versions of FreeBSD for several years. Change the example to use the appropriate pkg(8) command line. --- docsite/rst/intro_bsd.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_bsd.rst b/docsite/rst/intro_bsd.rst index 17c1b8d1516..ba0e07f2c86 100644 --- a/docsite/rst/intro_bsd.rst +++ b/docsite/rst/intro_bsd.rst @@ -30,7 +30,7 @@ Bootstrapping BSD For Ansible to effectively manage your machine, we need to install Python along with a json library, in this case we are using Python 2.7 which already has json included. On your control machine you can simply execute the following for most versions of FreeBSD:: - ansible -m raw -a “pkg_add -r python27” mybsdhost1 + ansible -m raw -a “pkg install -y python27” mybsdhost1 Once this is done you can now use other Ansible modules aside from the ``raw`` module. From 6e716b177e280c9aa9cba9f7f6b564913c6ea7ed Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 25 Jan 2016 10:33:50 -0500 Subject: [PATCH 0459/1113] add webdocs as alias --- docsite/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/Makefile b/docsite/Makefile index 15347f84bf9..2b87827c597 100644 --- a/docsite/Makefile +++ b/docsite/Makefile @@ -20,6 +20,8 @@ viewdocs: clean staticmin htmldocs: staticmin ./build-site.py rst +webdocs: htmldocs + clean: -rm -rf htmlout -rm -f .buildinfo From af88e348315aaf6f0ac3870106405d32cb693213 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 25 Jan 2016 10:49:54 -0500 Subject: [PATCH 0460/1113] better error when host inventory script isnt dict --- lib/ansible/inventory/script.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index 999e472f539..95e48eff58f 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -149,7 +149,10 @@ class InventoryScript: def get_host_variables(self, host): """ Runs <script> --host <hostname> to determine additional host variables """ if self.host_vars_from_top is not None: - got = self.host_vars_from_top.get(host.name, {}) + try: + got = self.host_vars_from_top.get(host.name, {}) + except AttributeError as e: + raise AnsibleError("Improperly formated host information for %s: %s" % (host.name,to_str(e))) return got cmd = [self.filename, "--host", host.name] From 87fe32319ffb86ec7ea13f23ef9d7a37d6844469 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 25 Jan 2016 17:11:36 -0500 Subject: [PATCH 0461/1113] ensure stdout callback alwasy is loaded first it is now called for every event prior to any other callbacks fixes #14114 --- lib/ansible/executor/task_queue_manager.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 13840de5964..b5260c1f41a 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -144,11 +144,13 @@ class TaskQueueManager: self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK if isinstance(self._stdout_callback, CallbackBase): - self._callback_plugins.append(self._stdout_callback) stdout_callback_loaded = True elif isinstance(self._stdout_callback, basestring): if self._stdout_callback not in callback_loader: raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback) + else: + self._stdout_callback = callback_loader.get(self._stdout_callback) + stdout_callback_loaded = True else: raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin") @@ -276,7 +278,7 @@ class TaskQueueManager: self._terminated = True def send_callback(self, method_name, *args, **kwargs): - for callback_plugin in self._callback_plugins: + for callback_plugin in [self._stdout_callback] + self._callback_plugins: # a plugin that set self.disabled to True will not be called # see osx_say.py example for such a plugin if getattr(callback_plugin, 'disabled', False): From ac1d1673be1968f21f158b8a1fda6249d0fb9b1e Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 25 Jan 2016 17:37:39 -0500 Subject: [PATCH 0462/1113] adhoc now respects limit when listing hosts also removed cruft about localhost as if used it is specified inline fixes #13848 --- lib/ansible/cli/adhoc.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 250241a848f..97df8fcdbf0 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -124,17 +124,13 @@ class AdHocCLI(CLI): inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) variable_manager.set_inventory(inventory) - hosts = inventory.list_hosts(pattern) - no_hosts = False - if len(hosts) == 0: - display.warning("provided hosts list is empty, only localhost is available") - no_hosts = True if self.options.subset: inventory.subset(self.options.subset) - if len(inventory.list_hosts(pattern)) == 0 and not no_hosts: - # Invalid limit - raise AnsibleError("Specified --limit does not match any hosts") + + hosts = inventory.list_hosts(pattern) + if len(hosts) == 0: + raise AnsibleError("Specified hosts options do not match any hosts") if self.options.listhosts: display.display(' hosts (%d):' % len(hosts)) From 041e1979c40725b55bbf9735e26fcaaea5cdec9f Mon Sep 17 00:00:00 2001 From: Jonathan Davila <jdavila@ansible.com> Date: Mon, 25 Jan 2016 17:40:20 -0500 Subject: [PATCH 0463/1113] Boto3 error handle fix --- lib/ansible/module_utils/ec2.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index fdb326a7f1c..7a30955db8b 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -147,7 +147,9 @@ def get_aws_connection_info(module, boto3=False): elif boto3 and HAS_BOTO3: # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None. region = botocore.session.get_session().get_config_variable('region') - + elif boto3 and not HAS_BOTO3: + module.fail_json("Boto3 is required for this module. Please install boto3 and try again") + if not security_token: if 'AWS_SECURITY_TOKEN' in os.environ: security_token = os.environ['AWS_SECURITY_TOKEN'] From 9df06624084f55b1adaff04a93008bb0ae0a3693 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 25 Jan 2016 22:01:48 -0500 Subject: [PATCH 0464/1113] simpler conditional --- lib/ansible/module_utils/ec2.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 7a30955db8b..2263ad86f45 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -144,12 +144,12 @@ def get_aws_connection_info(module, boto3=False): region = boto.config.get('Boto', 'aws_region') if not region: region = boto.config.get('Boto', 'ec2_region') - elif boto3 and HAS_BOTO3: + elif HAS_BOTO3: # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None. region = botocore.session.get_session().get_config_variable('region') - elif boto3 and not HAS_BOTO3: + else: module.fail_json("Boto3 is required for this module. Please install boto3 and try again") - + if not security_token: if 'AWS_SECURITY_TOKEN' in os.environ: security_token = os.environ['AWS_SECURITY_TOKEN'] From c063803a91ac9c5d54c8113eff2d2d9b32875058 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 25 Jan 2016 22:20:55 -0500 Subject: [PATCH 0465/1113] raise AnsibleError as an 'expected' exception fixes #14065 --- lib/ansible/parsing/splitter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py index feb0cd2b34b..fa26242fcfc 100644 --- a/lib/ansible/parsing/splitter.py +++ b/lib/ansible/parsing/splitter.py @@ -256,6 +256,6 @@ def split_args(args): # If we're done and things are not at zero depth or we're still inside quotes, # raise an error to indicate that the args were unbalanced if print_depth or block_depth or comment_depth or inside_quotes: - raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes") + raise AnsibleError("error while splitting arguments, either an unbalanced jinja2 block or quotes") return params From f4d68b8860678d710fc9e3ff04aa1b3736e28ab2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 25 Jan 2016 19:17:46 -0800 Subject: [PATCH 0466/1113] Transform tracebacks into unicode before printing Fixes #14042 --- bin/ansible | 3 ++- lib/ansible/executor/process/worker.py | 7 ++++--- lib/ansible/executor/task_queue_manager.py | 3 ++- lib/ansible/plugins/connection/winrm.py | 12 ++++++------ 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/bin/ansible b/bin/ansible index 627510a72e8..a02c5bc1745 100755 --- a/bin/ansible +++ b/bin/ansible @@ -40,6 +40,7 @@ from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError from ansible.utils.display import Display from ansible.utils.unicode import to_unicode + ######################################## ### OUTPUT OF LAST RESORT ### class LastResort(object): @@ -108,7 +109,7 @@ if __name__ == '__main__': have_cli_options = cli is not None and cli.options is not None display.error("Unexpected Exception: %s" % to_unicode(e), wrap_text=False) if not have_cli_options or have_cli_options and cli.options.verbosity > 2: - display.display("the full traceback was:\n\n%s" % traceback.format_exc()) + display.display(u"the full traceback was:\n\n%s" % to_unicode(traceback.format_exc())) else: display.display("to see the full traceback, use -vvv") sys.exit(250) diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py index 73f5faa78b6..120bd8b1414 100644 --- a/lib/ansible/executor/process/worker.py +++ b/lib/ansible/executor/process/worker.py @@ -48,6 +48,7 @@ from ansible.playbook.task import Task from ansible.vars.unsafe_proxy import AnsibleJSONUnsafeDecoder from ansible.utils.debug import debug +from ansible.utils.unicode import to_unicode __all__ = ['WorkerProcess'] @@ -135,11 +136,11 @@ class WorkerProcess(multiprocessing.Process): try: self._host.vars = dict() self._host.groups = [] - task_result = TaskResult(self._host, self._task, dict(failed=True, exception=traceback.format_exc(), stdout='')) + task_result = TaskResult(self._host, self._task, dict(failed=True, exception=to_unicode(traceback.format_exc()), stdout='')) self._rslt_q.put(task_result, block=False) except: - debug("WORKER EXCEPTION: %s" % e) - debug("WORKER EXCEPTION: %s" % traceback.format_exc()) + debug(u"WORKER EXCEPTION: %s" % to_unicode(e)) + debug(u"WORKER EXCEPTION: %s" % to_unicode(traceback.format_exc())) debug("WORKER PROCESS EXITING") diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index b5260c1f41a..64b952126fe 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -35,6 +35,7 @@ from ansible.plugins import callback_loader, strategy_loader, module_loader from ansible.template import Templar from ansible.vars.hostvars import HostVars from ansible.plugins.callback import CallbackBase +from ansible.utils.unicode import to_unicode try: from __main__ import display @@ -306,7 +307,7 @@ class TaskQueueManager: method(*args, **kwargs) except Exception as e: import traceback - orig_tb = traceback.format_exc() + orig_tb = to_unicode(traceback.format_exc()) try: v1_method = method.replace('v2_','') v1_method(*args, **kwargs) diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index dec48787408..ef2f630f97a 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -137,20 +137,20 @@ class Connection(ConnectionBase): protocol.send_message('') return protocol except Exception as e: - err_msg = (str(e) or repr(e)).strip() - if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I): + err_msg = to_unicode(e).strip() + if re.search(ur'Operation\s+?timed\s+?out', err_msg, re.I): raise AnsibleError('the connection attempt timed out') - m = re.search(r'Code\s+?(\d{3})', err_msg) + m = re.search(ur'Code\s+?(\d{3})', err_msg) if m: code = int(m.groups()[0]) if code == 401: err_msg = 'the username/password specified for this server was incorrect' elif code == 411: return protocol - errors.append('%s: %s' % (transport, err_msg)) - display.vvvvv('WINRM CONNECTION ERROR: %s\n%s' % (err_msg, traceback.format_exc()), host=self._winrm_host) + errors.append(u'%s: %s' % (transport, err_msg)) + display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_unicode(traceback.format_exc())), host=self._winrm_host) if errors: - raise AnsibleError(', '.join(errors)) + raise AnsibleError(', '.join(to_str(errors))) else: raise AnsibleError('No transport found for WinRM connection') From 3cf59d30f72641ab51e0777e066cad1293295c67 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 18 Jan 2016 13:26:54 -0800 Subject: [PATCH 0467/1113] For synchronize, fix sudo to execute on the remote end of the connection * In 2.0.0.x become was reversed for synchronize. It was happening on the local machine instead of the remote machine. This restores the ansible-1.9.x behaviour of doing become on the remote machine. However, there's aspects of this that are hacky (no hackier than ansible-1.9 but not using 2.0 features). The big problem is that it does not understand any become method except sudo. I'm willing to use a partial fix now because we don't want people to get used to the reversed semantics in their playbooks. * synchronize copying to the wrong host when inventory_hostname is localhost * Fix problem with unicode arguments (first seen as a bug on synchronize) Fixes #14041 Fixes #13825 --- lib/ansible/module_utils/basic.py | 4 +- lib/ansible/plugins/action/synchronize.py | 70 +++++++++++++++++------ 2 files changed, 55 insertions(+), 19 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 42ea8e79060..1da60ac3816 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1788,7 +1788,9 @@ class AnsibleModule(object): elif isinstance(args, basestring) and use_unsafe_shell: shell = True elif isinstance(args, basestring): - args = shlex.split(args.encode('utf-8')) + if isinstance(args, unicode): + args = args.encode('utf-8') + args = shlex.split(args) else: msg = "Argument 'args' to run_command must be list or string" self.fail_json(rc=257, cmd=args, msg=msg) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index 45004d5ed4e..a91fa064e17 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -71,6 +71,8 @@ class ActionModule(ActionBase): def _process_remote(self, host, path, user): transport = self._play_context.connection if host not in C.LOCALHOST or transport != "local": + if host in C.LOCALHOST: + self._task.args['_substitute_controller'] = True return self._format_rsync_rsh_target(host, path, user) if ':' not in path and not path.startswith('/'): @@ -103,14 +105,40 @@ class ActionModule(ActionBase): def run(self, tmp=None, task_vars=None): ''' generates params and passes them on to the rsync module ''' + # When modifying this function be aware of the tricky convolutions + # your thoughts have to go through: + # + # In normal ansible, we connect from controller to inventory_hostname + # (playbook's hosts: field) or controller to delegate_to host and run + # a module on one of those hosts. + # + # So things that are directly related to the core of ansible are in + # terms of that sort of connection that always originate on the + # controller. + # + # In synchronize we use ansible to connect to either the controller or + # to the delegate_to host and then run rsync which makes its own + # connection from controller to inventory_hostname or delegate_to to + # inventory_hostname. + # + # That means synchronize needs to have some knowledge of the + # controller to inventory_host/delegate host that ansible typically + # establishes and use those to construct a command line for rsync to + # connect from the inventory_host to the controller/delegate. The + # challenge for coders is remembering which leg of the trip is + # associated with the conditions that you're checking at any one time. if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) - original_transport = task_vars.get('ansible_connection') or self._play_context.connection + # self._play_context.connection accounts for delegate_to so + # remote_transport is the transport ansible thought it would need + # between the controller and the delegate_to host or the controller + # and the remote_host if delegate_to isn't set. + remote_transport = False - if original_transport != 'local': + if self._play_context.connection != 'local': remote_transport = True try: @@ -136,7 +164,14 @@ class ActionModule(ActionBase): except KeyError: dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname) - dest_is_local = dest_host in C.LOCALHOST + # dest_is_local tells us if the host rsync runs on is the same as the + # host rsync puts the files on. This is about *rsync's connection*, + # not about the ansible connection to run the module. + dest_is_local = False + if not delegate_to and remote_transport is False: + dest_is_local = True + elif delegate_to and delegate_to == dest_host: + dest_is_local = True # CHECK FOR NON-DEFAULT SSH PORT if self._task.args.get('dest_port', None) is None: @@ -161,23 +196,13 @@ class ActionModule(ActionBase): # Delegate to localhost as the source of the rsync unless we've been # told (via delegate_to) that a different host is the source of the # rsync - transport_overridden = False if not use_delegate and remote_transport: # Create a connection to localhost to run rsync on new_stdin = self._connection._new_stdin new_connection = connection_loader.get('local', self._play_context, new_stdin) self._connection = new_connection - transport_overridden = True self._override_module_replaced_vars(task_vars) - # COMPARE DELEGATE, HOST AND TRANSPORT - between_multiple_hosts = False - if dest_host != src_host and remote_transport: - # We're not copying two filesystem trees on the same host so we - # need to correctly format the paths for rsync (like - # user@host:path/to/tree - between_multiple_hosts = True - # SWITCH SRC AND DEST HOST PER MODE if self._task.args.get('mode', 'push') == 'pull': (dest_host, src_host) = (src_host, dest_host) @@ -185,7 +210,7 @@ class ActionModule(ActionBase): # MUNGE SRC AND DEST PER REMOTE_HOST INFO src = self._task.args.get('src', None) dest = self._task.args.get('dest', None) - if between_multiple_hosts: + if not dest_is_local: # Private key handling if use_delegate: private_key = task_vars.get('ansible_ssh_private_key_file') or self._play_context.private_key_file @@ -231,9 +256,18 @@ class ActionModule(ActionBase): # Allow custom rsync path argument rsync_path = self._task.args.get('rsync_path', None) - # If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument - if not rsync_path and transport_overridden and self._play_context.become and self._play_context.become_method == 'sudo' and not dest_is_local: - rsync_path = 'sudo rsync' + if not dest_is_local: + if self._play_context.become and not rsync_path: + # If no rsync_path is set, become was originally set, and dest is + # remote then add privilege escalation here. + if self._play_context.become_method == 'sudo': + rsync_path = 'sudo rsync' + # TODO: have to add in the rest of the become methods here + + # We cannot use privilege escalation on the machine running the + # module. Instead we run it on the machine rsync is connecting + # to. + self._play_context.become = False # make sure rsync path is quoted. if rsync_path: @@ -245,7 +279,7 @@ class ActionModule(ActionBase): # run the module and store the result result.update(self._execute_module('synchronize', task_vars=task_vars)) - if 'SyntaxError' in result['msg']: + if 'SyntaxError' in result.get('exception', result.get('msg', '')): # Emit a warning about using python3 because synchronize is # somewhat unique in running on localhost result['traceback'] = result['msg'] From d0a062ffa35c7179cf1238bdcb901c0918e59782 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 25 Jan 2016 19:46:57 -0800 Subject: [PATCH 0468/1113] python3 doesn't have raw explicit-unicode literals. Workaround it with a raw native string that we make unicode in py2. --- lib/ansible/plugins/connection/winrm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index ef2f630f97a..c291e21782c 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -138,9 +138,9 @@ class Connection(ConnectionBase): return protocol except Exception as e: err_msg = to_unicode(e).strip() - if re.search(ur'Operation\s+?timed\s+?out', err_msg, re.I): + if re.search(to_unicode(r'Operation\s+?timed\s+?out'), err_msg, re.I): raise AnsibleError('the connection attempt timed out') - m = re.search(ur'Code\s+?(\d{3})', err_msg) + m = re.search(to_unicode(r'Code\s+?(\d{3})'), err_msg) if m: code = int(m.groups()[0]) if code == 401: From c44110bc81f05fcbedf3dfc429dfe7406b13d86a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 25 Jan 2016 19:57:28 -0800 Subject: [PATCH 0469/1113] Update submodule refs to pick up the synchronize internal parameter --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 09e2457eb0e..25414bbd1b2 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 09e2457eb0e811ac293065dd77cd31597ceb2da7 +Subproject commit 25414bbd1b28241a029a9f8875fc6349b090fc2d diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index e8427cb32a0..e13942f9c57 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit e8427cb32a07ebaa4682192675a075fc336f6564 +Subproject commit e13942f9c572080f36e76a8c22f7000ffeedac94 From 5b293b56d676eea6b143932d5e66321f568eb589 Mon Sep 17 00:00:00 2001 From: Tobias Wolf <towolf@gmail.com> Date: Mon, 25 Jan 2016 12:50:11 +0100 Subject: [PATCH 0470/1113] Handle the key 'prepared' in the 'diff' result from modules So far, when a 'diff' dict is returned with module results, it is checked for 'before' and 'after' texts, which are processed in _get_diff() by python difflib. This generates the changes to display when CLI users specify --diff. However, some modules will generate changes that cannot easily be expressed in a conventional diff. One example is the output of the synchronize module, which presents changed files in a common log format as in `rsync --itemize-changes`. Add a check for a diff['prepared'] key, which can contain prepared diff text from modules. --- lib/ansible/plugins/callback/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index faf04b1180f..bb24577d648 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -131,6 +131,8 @@ class CallbackBase: differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10) ret.extend(list(differ)) ret.append('\n') + if 'prepared' in diff: + ret.append(to_unicode(diff['prepared'])) return u"".join(ret) except UnicodeDecodeError: ret.append(">> the files are different, but the diff library cannot compare unicode strings\n\n") From 66104191d182aedd7b76f4a1b839c448f97c3ffe Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 26 Jan 2016 11:22:27 -0500 Subject: [PATCH 0471/1113] fixed permissions check for ansible.log fixes #13990 --- lib/ansible/utils/display.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 3703c15540b..ba927237599 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -48,18 +48,17 @@ except NameError: # These are module level as we currently fork and serialize the whole process and locks in the objects don't play well with that debug_lock = Lock() +logger = None #TODO: make this a logging callback instead if C.DEFAULT_LOG_PATH: path = C.DEFAULT_LOG_PATH - if (os.path.exists(path) and not os.access(path, os.W_OK)) or not os.access(os.path.dirname(path), os.W_OK): - print("[WARNING]: log file at %s is not writeable, aborting\n" % path, file=sys.stderr) - - logging.basicConfig(filename=path, level=logging.DEBUG, format='%(asctime)s %(name)s %(message)s') - mypid = str(os.getpid()) - user = getpass.getuser() - logger = logging.getLogger("p=%s u=%s | " % (mypid, user)) -else: - logger = None + if (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK): + logging.basicConfig(filename=path, level=logging.DEBUG, format='%(asctime)s %(name)s %(message)s') + mypid = str(os.getpid()) + user = getpass.getuser() + logger = logging.getLogger("p=%s u=%s | " % (mypid, user)) + else: + print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr) class Display: From 07a9a54b0efb8d15bb03fbb3bf6477318d53b8e6 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 26 Jan 2016 13:10:23 -0500 Subject: [PATCH 0472/1113] Fix lookup of parent attribute when parent doesn't have the attr Fixes #14100 --- lib/ansible/playbook/block.py | 21 ++++++++------------- lib/ansible/playbook/task.py | 6 +----- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index b31ffbcfe8b..095e6b338db 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -298,18 +298,18 @@ class Block(Base, Become, Conditional, Taggable): value = self._extend_value(value, parent_value) else: value = parent_value - if self._role and (value is None or extend): - parent_value = getattr(self._role, attr) + if self._role and (value is None or extend) and hasattr(self._role, attr): + parent_value = getattr(self._role, attr, None) if extend: value = self._extend_value(value, parent_value) else: value = parent_value - if len(self._dep_chain) and (not value or extend): + if len(self._dep_chain) and (value is None or extend): reverse_dep_chain = self._dep_chain[:] reverse_dep_chain.reverse() for dep in reverse_dep_chain: - dep_value = getattr(dep, attr) + dep_value = getattr(dep, attr, None) if extend: value = self._extend_value(value, dep_value) else: @@ -317,14 +317,13 @@ class Block(Base, Become, Conditional, Taggable): if value is not None and not extend: break - - if self._play and (value is None or extend): - parent_value = getattr(self._play, attr) + if self._play and (value is None or extend) and hasattr(self._play, attr): + parent_value = getattr(self._play, attr, None) if extend: value = self._extend_value(value, parent_value) else: value = parent_value - except KeyError: + except KeyError as e: pass return value @@ -344,11 +343,7 @@ class Block(Base, Become, Conditional, Taggable): ''' Override for the 'tags' getattr fetcher, used from Base. ''' - any_errors_fatal = self._attributes['any_errors_fatal'] - if hasattr(self, '_get_parent_attribute'): - if self._get_parent_attribute('any_errors_fatal'): - any_errors_fatal = True - return any_errors_fatal + return self._get_parent_attribute('any_errors_fatal') def filter_tagged_tasks(self, play_context, all_vars): ''' diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 154ff53d5e3..6bd3caaca5b 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -419,9 +419,5 @@ class Task(Base, Conditional, Taggable, Become): ''' Override for the 'tags' getattr fetcher, used from Base. ''' - any_errors_fatal = self._attributes['any_errors_fatal'] - if hasattr(self, '_get_parent_attribute'): - if self._get_parent_attribute('any_errors_fatal'): - any_errors_fatal = True - return any_errors_fatal + return self._get_parent_attribute('any_errors_fatal') From 78d499140c943b6bc806c222799060892bca8ae7 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 26 Jan 2016 14:11:28 -0500 Subject: [PATCH 0473/1113] Re-implementing the retry file feature for 2.0 Fixes #13944 --- lib/ansible/constants.py | 2 +- lib/ansible/executor/playbook_executor.py | 36 +++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 9b84825d6bc..d277c717b54 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -235,7 +235,7 @@ COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'AN DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True) RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) -RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/', ispath=True) +RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, ispath=True) DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, isnone=True) # CONNECTION RELATED diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index bcfe1bebbe9..42dca10c065 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import StringIO import getpass import locale import os @@ -27,6 +28,7 @@ import sys from ansible.compat.six import string_types +from ansible import constants as C from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook from ansible.template import Templar @@ -171,6 +173,20 @@ class PlaybookExecutor: if entry: entrylist.append(entry) # per playbook + if C.RETRY_FILES_ENABLED: + retries = list(set(self._tqm._failed_hosts.keys() + self._tqm._unreachable_hosts.keys())) + retries.sort() + if len(retries) > 0: + if C.RETRY_FILES_SAVE_PATH: + basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH) + else: + basedir = os.path.dirname(playbook_path) + + (retry_name, _) = os.path.splitext(os.path.basename(playbook_path)) + filename = os.path.join(basedir, "%s.retry" % retry_name) + if self._generate_retry_inventory(filename, retries): + display.display("\tto retry, use: --limit @%s\n" % filename) + # send the stats callback for this playbook if self._tqm is not None: self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) @@ -233,3 +249,23 @@ class PlaybookExecutor: return serialized_batches + def _generate_retry_inventory(self, retry_path, replay_hosts): + ''' + Called when a playbook run fails. It generates an inventory which allows + re-running on ONLY the failed hosts. This may duplicate some variable + information in group_vars/host_vars but that is ok, and expected. + ''' + + buf = StringIO.StringIO() + for x in replay_hosts: + buf.write("%s\n" % x) + + try: + fd = open(retry_path, 'w') + fd.write(buf.getvalue()) + fd.close() + except Exception as e: + display.error("Could not create retry file '%s'. The error was: %s" % (retry_path, e)) + return False + + return True From 3ed3a5f43aeba8d7cbad11e93ef76a66678450d5 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 26 Jan 2016 14:52:41 -0500 Subject: [PATCH 0474/1113] Make retry file generation not use StringIO --- lib/ansible/executor/playbook_executor.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 42dca10c065..d109b5296a5 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -19,7 +19,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import StringIO import getpass import locale import os @@ -256,14 +255,10 @@ class PlaybookExecutor: information in group_vars/host_vars but that is ok, and expected. ''' - buf = StringIO.StringIO() - for x in replay_hosts: - buf.write("%s\n" % x) - try: - fd = open(retry_path, 'w') - fd.write(buf.getvalue()) - fd.close() + with open(retry_path, 'w') as fd: + for x in replay_hosts: + fd.write("%s\n" % x) except Exception as e: display.error("Could not create retry file '%s'. The error was: %s" % (retry_path, e)) return False From cb2b19ced8b93b6688370f895c2183ecbd98b3ae Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 26 Jan 2016 13:53:42 -0800 Subject: [PATCH 0475/1113] Fix for inventory hosts on localhost with alternate ports. Fixes https://github.com/ansible/ansible-modules-core/issues/2831 --- lib/ansible/plugins/action/synchronize.py | 30 ++++++++++++++++++++--- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index a91fa064e17..c0b50eff971 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -68,10 +68,21 @@ class ActionModule(ActionBase): path = self._get_absolute_path(path=path) return path - def _process_remote(self, host, path, user): + def _process_remote(self, host, path, user, port_matches_localhost_port): + """ + :arg host: hostname for the path + :arg path: file path + :arg user: username for the transfer + :arg port_matches_localhost_port: boolean whether the remote port + matches the port used by localhost's sshd. This is used in + conjunction with seeing whether the host is localhost to know + if we need to have the module substitute the pathname or if it + is a different host (for instance, an ssh tunnelled port or an + alternative ssh port to a vagrant host.) + """ transport = self._play_context.connection if host not in C.LOCALHOST or transport != "local": - if host in C.LOCALHOST: + if port_matches_localhost_port and host in C.LOCALHOST: self._task.args['_substitute_controller'] = True return self._format_rsync_rsh_target(host, path, user) @@ -164,6 +175,17 @@ class ActionModule(ActionBase): except KeyError: dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname) + localhost_ports = set() + for host in C.LOCALHOST: + localhost_vars = task_vars['hostvars'].get(host, {}) + for port_var in ('ansible_port', 'ansible_ssh_port'): + port = localhost_vars.get(port_var, None) + if port: + break + else: + port = C.DEFAULT_REMOTE_PORT + localhost_ports.add(port) + # dest_is_local tells us if the host rsync runs on is the same as the # host rsync puts the files on. This is about *rsync's connection*, # not about the ansible connection to run the module. @@ -236,12 +258,12 @@ class ActionModule(ActionBase): # use the mode to define src and dest's url if self._task.args.get('mode', 'push') == 'pull': # src is a remote path: <user>@<host>, dest is a local path - src = self._process_remote(src_host, src, user) + src = self._process_remote(src_host, src, user, inv_port in localhost_ports) dest = self._process_origin(dest_host, dest, user) else: # src is a local path, dest is a remote path: <user>@<host> src = self._process_origin(src_host, src, user) - dest = self._process_remote(dest_host, dest, user) + dest = self._process_remote(dest_host, dest, user, inv_port in localhost_ports) else: # Still need to munge paths (to account for roles) even if we aren't # copying files between hosts From 326ae2108913e275388d8a0867512a120c66ccbd Mon Sep 17 00:00:00 2001 From: Stig Telfer <stig@telfer.org> Date: Wed, 27 Jan 2016 12:02:59 +0000 Subject: [PATCH 0476/1113] Details on how to convert subnet masks into CIDR Use data reported by Ansible network facts as an example. --- docsite/rst/playbooks_filters_ipaddr.rst | 31 ++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/docsite/rst/playbooks_filters_ipaddr.rst b/docsite/rst/playbooks_filters_ipaddr.rst index ce5cdaa0216..d5f9391db81 100644 --- a/docsite/rst/playbooks_filters_ipaddr.rst +++ b/docsite/rst/playbooks_filters_ipaddr.rst @@ -283,6 +283,37 @@ If needed, you can extract subnet and prefix information from 'host/prefix' valu # {{ host_prefix | ipaddr('host/prefix') | ipaddr('prefix') }} [64, 24] +Converting subnet masks to CIDR notation +---------------------------------------- + +Given a subnet in the form of network address and subnet mask, it can be converted into CIDR notation using ``ipaddr()``. This can be useful for converting Ansible facts gathered about network configuration from subnet masks into CIDR format:: + + ansible_default_ipv4: { + address: "192.168.0.11", + alias: "eth0", + broadcast: "192.168.0.255", + gateway: "192.168.0.1", + interface: "eth0", + macaddress: "fa:16:3e:c4:bd:89", + mtu: 1500, + netmask: "255.255.255.0", + network: "192.168.0.0", + type: "ether" + } + +First concatenate network and netmask:: + + net_mask = "{{ ansible_default_ipv4.network }}/{{ ansible_default_ipv4.netmask }}" + '192.168.0.0/255.255.255.0' + +This result can be canonicalised with ``ipaddr()`` to produce a subnet in CIDR format:: + + # {{ net_mask | ipaddr('prefix') }} + '24' + + # {{ net_mask | ipaddr('net') }} + '192.168.0.0/24' + IP address conversion --------------------- From a61e51efb75f5d113d226dbbb0dd42581a141f31 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 27 Jan 2016 09:56:19 -0500 Subject: [PATCH 0477/1113] Don't try to generate retry files if the tqm hasn't been initialized Fixes #14144 --- lib/ansible/executor/playbook_executor.py | 28 +++++++++++------------ 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index d109b5296a5..30d9ad6d6b8 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -172,22 +172,22 @@ class PlaybookExecutor: if entry: entrylist.append(entry) # per playbook - if C.RETRY_FILES_ENABLED: - retries = list(set(self._tqm._failed_hosts.keys() + self._tqm._unreachable_hosts.keys())) - retries.sort() - if len(retries) > 0: - if C.RETRY_FILES_SAVE_PATH: - basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH) - else: - basedir = os.path.dirname(playbook_path) - - (retry_name, _) = os.path.splitext(os.path.basename(playbook_path)) - filename = os.path.join(basedir, "%s.retry" % retry_name) - if self._generate_retry_inventory(filename, retries): - display.display("\tto retry, use: --limit @%s\n" % filename) - # send the stats callback for this playbook if self._tqm is not None: + if C.RETRY_FILES_ENABLED: + retries = list(set(self._tqm._failed_hosts.keys() + self._tqm._unreachable_hosts.keys())) + retries.sort() + if len(retries) > 0: + if C.RETRY_FILES_SAVE_PATH: + basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH) + else: + basedir = os.path.dirname(playbook_path) + + (retry_name, _) = os.path.splitext(os.path.basename(playbook_path)) + filename = os.path.join(basedir, "%s.retry" % retry_name) + if self._generate_retry_inventory(filename, retries): + display.display("\tto retry, use: --limit @%s\n" % filename) + self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) # if the last result wasn't zero, break out of the playbook file name loop From c59916c43da70946f66174547bee5c6e0109715f Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Wed, 27 Jan 2016 10:51:23 -0500 Subject: [PATCH 0478/1113] fixes issue with eos shared module argument precedence The eos shared module should prefer to use explicit task arguments over arguments provided through the provider. This fixes a problem where that was not the case --- lib/ansible/module_utils/eos.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/eos.py b/lib/ansible/module_utils/eos.py index a89869dced4..c5446dc5394 100644 --- a/lib/ansible/module_utils/eos.py +++ b/lib/ansible/module_utils/eos.py @@ -148,7 +148,8 @@ class NetworkModule(AnsibleModule): provider = params.get('provider') or dict() for key, value in provider.items(): if key in NET_COMMON_ARGS.keys(): - params[key] = value + if not params.get(key) and value is not None: + params[key] = value return params def connect(self): From 4fa6902c960df565d2475e78d080d620ee0bfd7f Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Wed, 27 Jan 2016 11:09:40 -0500 Subject: [PATCH 0479/1113] address minor bugs in nxos shared module This addresses two issues with the nxos shared module. The first issue is argument precedence checking. The module should prefer explicit arguments over arguments passed vi the provider. This is now fixed to honor that precedence. The second issue is collecting output from nxapi and returning the response. Prior to this change the entire json structure was returned. Now just the output is returned to align it better with cli based output --- lib/ansible/module_utils/nxos.py | 42 ++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py index d8eb0f97de4..7a523dbae46 100644 --- a/lib/ansible/module_utils/nxos.py +++ b/lib/ansible/module_utils/nxos.py @@ -23,7 +23,7 @@ NET_COMMON_ARGS = dict( port=dict(type='int'), username=dict(required=True), password=dict(no_log=True), - transport=dict(choices=['cli', 'nxapi']), + transport=dict(default='cli', choices=['cli', 'nxapi']), use_ssl=dict(default=False, type='bool'), provider=dict() ) @@ -107,11 +107,24 @@ class Nxapi(object): self.module.fail_json(**headers) response = self.module.from_json(response.read()) - if 'error' in response: - err = response['error'] - self.module.fail_json(msg='json-rpc error % ' % str(err)) + result = list() - return response + output = response['ins_api']['outputs']['output'] + if isinstance(output, list): + for item in response['ins_api']['outputs']['output']: + if item['code'] != '200': + self.module.fail_json(msg=item['msg'], command=item['input'], + code=item['code']) + else: + result.append(item['body']) + elif output['code'] != '200': + self.module.fail_json(msg=item['msg'], command=item['input'], + code=item['code']) + else: + result.append(output['body']) + + + return result class Cli(object): @@ -150,7 +163,8 @@ class NetworkModule(AnsibleModule): provider = params.get('provider') or dict() for key, value in provider.items(): if key in NET_COMMON_ARGS.keys(): - params[key] = value + if not params.get(key) and value is not None: + params[key] = value return params def connect(self): @@ -159,11 +173,9 @@ class NetworkModule(AnsibleModule): else: self.connection = Cli(self) - try: - self.connection.connect() + self.connection.connect() + if self.params['transport'] == 'cli': self.execute('terminal length 0') - except Exception, exc: - self.fail_json(msg=exc.message) def configure(self, commands): commands = to_list(commands) @@ -176,10 +188,7 @@ class NetworkModule(AnsibleModule): return responses def execute(self, commands, **kwargs): - try: - return self.connection.send(commands, **kwargs) - except Exception, exc: - self.fail_json(msg=exc.message) + return self.connection.send(commands, **kwargs) def disconnect(self): self.connection.close() @@ -194,10 +203,7 @@ class NetworkModule(AnsibleModule): if self.params['transport'] == 'cli': return self.execute(cmd)[0] else: - resp = self.execute(cmd) - if not resp.get('ins_api').get('outputs').get('output').get('body'): - self.fail_json(msg="Unrecognized response: %s" % str(resp)) - return resp['ins_api']['outputs']['output']['body'] + return self.execute(cmd) def get_module(**kwargs): """Return instance of NetworkModule From 6bf2f45ff52d252dbada6a1860416fa603be56bd Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 27 Jan 2016 14:09:14 -0500 Subject: [PATCH 0480/1113] fix for so su works in more cases should not fail anymore on csh, fish nor the BSDs fixes #14116 --- lib/ansible/playbook/play_context.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 409f9661b8a..1804a032c62 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -451,8 +451,10 @@ class PlayContext(Base): if self.become_method == 'sudo': # If we have a password, we run sudo with a randomly-generated - # prompt set using -p. Otherwise we run it with -n, which makes + # prompt set using -p. Otherwise we run it with default -n, which makes # it fail if it would have prompted for a password. + # Cannot rely on -n as it can be removed from defaults, which should be + # done for older versions of sudo that do not support the option. # # Passing a quoted compound command to sudo (or sudo -s) # directly doesn't work, so we shellquote it with pipes.quote() @@ -468,12 +470,14 @@ class PlayContext(Base): elif self.become_method == 'su': + # passing code ref to examine prompt as simple string comparisson isn't good enough with su def detect_su_prompt(data): SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE) return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data)) - prompt = detect_su_prompt - becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd) + + su_success_cmd = '%s -c %s' % (executable, success_cmd) # this is here cause su too succeptible to overquoting + becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, su_success_cmd) #works with sh elif self.become_method == 'pbrun': From a381c1bbd671172834015c62f50c7e7225efb827 Mon Sep 17 00:00:00 2001 From: Kamil Szczygiel <kamil.szczygiel@intel.com> Date: Wed, 27 Jan 2016 20:41:28 +0100 Subject: [PATCH 0481/1113] added skip_ssl argument for VMware module to skip SSL verification (required when using self signed certificates) --- lib/ansible/module_utils/vmware.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/vmware.py b/lib/ansible/module_utils/vmware.py index 6eb612de744..ca0440a26c3 100644 --- a/lib/ansible/module_utils/vmware.py +++ b/lib/ansible/module_utils/vmware.py @@ -21,6 +21,7 @@ try: import atexit import time + import ssl # requests is required for exception handling of the ConnectionError import requests from pyVim import connect @@ -104,6 +105,7 @@ def vmware_argument_spec(): hostname=dict(type='str', required=True), username=dict(type='str', aliases=['user', 'admin'], required=True), password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True), + skip_ssl=dict(type='bool', required=False, default=False), ) @@ -112,8 +114,15 @@ def connect_to_api(module, disconnect_atexit=True): hostname = module.params['hostname'] username = module.params['username'] password = module.params['password'] + skip_ssl = module.params['skip_ssl'] + try: - service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password) + if skip_ssl: + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.verify_mode = ssl.CERT_NONE + service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context) + else: + service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password) # Disabling atexit should be used in special cases only. # Such as IP change of the ESXi host which removes the connection anyway. From 3f3e3e3d5bdf042a0ef733477673fa703d97ccba Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 27 Jan 2016 12:48:15 -0800 Subject: [PATCH 0482/1113] Return an error when synchronize is used with something other than an ssh connection Suggested in #2832 --- lib/ansible/plugins/action/synchronize.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index c0b50eff971..bbd67dffecf 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -157,6 +157,13 @@ class ActionModule(ActionBase): except (AttributeError, KeyError): delegate_to = None + # ssh paramiko and local are fully supported transports. Anything + # else only works with delegate_to + if delegate_to is None and self._play_context.connection not in ('ssh', 'paramiko', 'smart', 'local'): + result['failed'] = True + result['msg'] = "synchronize uses rsync to function. rsync needs to connect to the remote host via ssh or a direct filesystem copy. This remote host is being accessed via %s instead so it cannot work." % self._play_context.connection + return result + use_ssh_args = self._task.args.pop('use_ssh_args', None) # Parameter name needed by the ansible module From c857b2004319786ff1529c3ea74470ffa1911004 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 27 Jan 2016 16:12:08 -0500 Subject: [PATCH 0483/1113] fixed issue with vars prompt warning causing error sometimes display object is not magically available, use it explicitly, no need to be classmethod anymore fixes #14147 --- lib/ansible/utils/display.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index ba927237599..cd91af1a4f2 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -278,13 +278,12 @@ class Display: else: return input(prompt_string) - @classmethod - def do_var_prompt(cls, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): result = None if sys.__stdin__.isatty(): - do_prompt = cls.prompt + do_prompt = self.prompt if prompt and default is not None: msg = "%s [%s]: " % (prompt, default) @@ -299,12 +298,12 @@ class Display: second = do_prompt("confirm " + msg, private) if result == second: break - display.display("***** VALUES ENTERED DO NOT MATCH ****") + self.display("***** VALUES ENTERED DO NOT MATCH ****") else: result = do_prompt(msg, private) else: result = None - display.warning("Not prompting as we are not in interactive mode") + self.warning("Not prompting as we are not in interactive mode") # if result is false and default is not None if not result and default is not None: From a928465c9ee9bf742729ac22078b07d420b20c91 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 27 Jan 2016 16:26:44 -0500 Subject: [PATCH 0484/1113] added note about 1.9 ppa --- docsite/rst/intro_installation.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 99e2661226c..d132752f36a 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -222,6 +222,7 @@ To configure the PPA on your machine and install ansible run these commands: $ sudo apt-get update $ sudo apt-get install ansible +.. note:: For the older version 1.9 we use this ppa:ansible/ansible-1.9 .. note:: On older Ubuntu distributions, "software-properties-common" is called "python-software-properties". Debian/Ubuntu packages can also be built from the source checkout, run: From fad6eb18448736ad1308f622be8e138fcf259288 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 27 Jan 2016 16:49:30 -0500 Subject: [PATCH 0485/1113] fixed unit test to match new output of become/su --- test/units/playbook/test_play_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/playbook/test_play_context.py b/test/units/playbook/test_play_context.py index 5434ef30004..cc9441dab87 100644 --- a/test/units/playbook/test_play_context.py +++ b/test/units/playbook/test_play_context.py @@ -149,7 +149,7 @@ class TestPlayContext(unittest.TestCase): play_context.become_method = 'su' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") - self.assertEqual(cmd, """%s -c '%s %s -c "%s -c '"'"'echo %s; %s'"'"'"'""" % (default_exe, su_exe, play_context.become_user, default_exe, play_context.success_key, default_cmd)) + self.assertEqual(cmd, """%s -c '%s %s -c %s -c '"'"'echo %s; %s'"'"''""" % (default_exe, su_exe, play_context.become_user, default_exe, play_context.success_key, default_cmd)) play_context.become_method = 'pbrun' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") From 0e410bbc8a6156d696edbebc5f07a23f454dd49c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 27 Jan 2016 20:17:52 -0800 Subject: [PATCH 0486/1113] Squashing was occuring even though pkgs didn't have a template that would be affected by squash This broke other uses of looping (looping for delegate_to in the reported bug) Fixes #13980 --- lib/ansible/executor/task_executor.py | 33 +++++++++++++++-------- test/units/executor/test_task_executor.py | 27 +++++++++++++++++++ 2 files changed, 49 insertions(+), 11 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 1417bc9d2c4..6350cc2b0e3 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -269,28 +269,39 @@ class TaskExecutor: if all(isinstance(o, string_types) for o in items): final_items = [] name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None) - # The user is doing an upgrade or some other operation - # that doesn't take name or pkg. - if name: + + # This gets the information to check whether the name field + # contains a template that we can squash for + template_no_item = template_with_item = None + if templar._contains_vars(name): + variables['item'] = '\0$' + template_no_item = templar.template(name, variables, cache=False) + variables['item'] = '\0@' + template_with_item = templar.template(name, variables, cache=False) + del variables['item'] + + # Check if the user is doing some operation that doesn't take + # name/pkg or the name/pkg field doesn't have any variables + # and thus the items can't be squashed + if name and (template_no_item != template_with_item): for item in items: variables['item'] = item if self._task.evaluate_conditional(templar, variables): - if templar._contains_vars(name): - new_item = templar.template(name, cache=False) - final_items.append(new_item) - else: - final_items.append(item) + new_item = templar.template(name, cache=False) + final_items.append(new_item) self._task.args['name'] = final_items + # Wrap this in a list so that the calling function loop + # executes exactly once return [final_items] + else: + # Restore the name parameter + self._task.args['name'] = name #elif: # Right now we only optimize single entries. In the future we # could optimize more types: # * lists can be squashed together # * dicts could squash entries that match in all cases except the # name or pkg field. - # Note: we really should be checking that the name or pkg field - # contains a template that expands with our with_items values. - # If it doesn't then we may break things return items def _execute(self, variables=None): diff --git a/test/units/executor/test_task_executor.py b/test/units/executor/test_task_executor.py index 0c868fef4b9..7135a39ae2a 100644 --- a/test/units/executor/test_task_executor.py +++ b/test/units/executor/test_task_executor.py @@ -198,21 +198,47 @@ class TestTaskExecutor(unittest.TestCase): shared_loader_obj = mock_shared_loader, ) + # + # No replacement + # + mock_task.action = 'yum' + new_items = te._squash_items(items=items, variables=job_vars) + self.assertEqual(new_items, ['a', 'b', 'c']) + mock_task.action = 'foo' + mock_task.args={'name': '{{item}}'} new_items = te._squash_items(items=items, variables=job_vars) self.assertEqual(new_items, ['a', 'b', 'c']) mock_task.action = 'yum' + mock_task.args={'name': 'static'} + new_items = te._squash_items(items=items, variables=job_vars) + self.assertEqual(new_items, ['a', 'b', 'c']) + + mock_task.action = 'yum' + mock_task.args={'name': '{{pkg_mgr}}'} + new_items = te._squash_items(items=items, variables=job_vars) + self.assertEqual(new_items, ['a', 'b', 'c']) + + # + # Replaces + # + mock_task.action = 'yum' + mock_task.args={'name': '{{item}}'} new_items = te._squash_items(items=items, variables=job_vars) self.assertEqual(new_items, [['a','c']]) mock_task.action = '{{pkg_mgr}}' + mock_task.args={'name': '{{item}}'} new_items = te._squash_items(items=items, variables=job_vars) self.assertEqual(new_items, [['a', 'c']]) + # # Smoketests -- these won't optimize but make sure that they don't # traceback either + # mock_task.action = '{{unknown}}' + mock_task.args={'name': '{{item}}'} new_items = te._squash_items(items=items, variables=job_vars) self.assertEqual(new_items, ['a', 'b', 'c']) @@ -220,6 +246,7 @@ class TestTaskExecutor(unittest.TestCase): dict(name='b', state='present'), dict(name='c', state='present')] mock_task.action = 'yum' + mock_task.args={'name': '{{item}}'} new_items = te._squash_items(items=items, variables=job_vars) self.assertEqual(new_items, items) From 90c14644b0897b85b6f466f6b795cccd6a2e054c Mon Sep 17 00:00:00 2001 From: Darren Tong <darren@getcontrol.co> Date: Wed, 27 Jan 2016 23:22:27 -0800 Subject: [PATCH 0487/1113] Fixing typo depricated for deprecated. --- docsite/rst/intro_getting_started.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_getting_started.rst b/docsite/rst/intro_getting_started.rst index 7b783209def..e79d075a73e 100644 --- a/docsite/rst/intro_getting_started.rst +++ b/docsite/rst/intro_getting_started.rst @@ -33,7 +33,7 @@ In releases up to and including Ansible 1.2, the default was strictly paramiko. Occasionally you'll encounter a device that doesn't support SFTP. This is rare, but should it occur, you can switch to SCP mode in :doc:`intro_configuration`. -When speaking with remote machines, Ansible by default assumes you are using SSH keys. SSH keys are encouraged but password authentication can also be used where needed by supplying the option ``--ask-pass``. If using sudo features and when sudo requires a password, also supply ``--ask-become-pass`` (previously ``--ask-sudo-pass`` which has been depricated). +When speaking with remote machines, Ansible by default assumes you are using SSH keys. SSH keys are encouraged but password authentication can also be used where needed by supplying the option ``--ask-pass``. If using sudo features and when sudo requires a password, also supply ``--ask-become-pass`` (previously ``--ask-sudo-pass`` which has been deprecated). While it may be common sense, it is worth sharing: Any management system benefits from being run near the machines being managed. If you are running Ansible in a cloud, consider running it from a machine inside that cloud. In most cases this will work better than on the open Internet. From ba9e5fa6ba1f2d210a52a1dea73fe89c6222783b Mon Sep 17 00:00:00 2001 From: Scott Radvan <scott@radsy.com> Date: Thu, 28 Jan 2016 21:58:49 +1300 Subject: [PATCH 0488/1113] grammar fixes; capitalize where necessary; general clean-up --- docsite/rst/intro_bsd.rst | 55 +++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 31 deletions(-) diff --git a/docsite/rst/intro_bsd.rst b/docsite/rst/intro_bsd.rst index ba0e07f2c86..26cfb985b81 100644 --- a/docsite/rst/intro_bsd.rst +++ b/docsite/rst/intro_bsd.rst @@ -1,4 +1,4 @@ -BSD support +BSD Support =========== .. contents:: Topics @@ -8,80 +8,73 @@ BSD support Working with BSD ```````````````` -As you may have already read, Ansible manages Linux/Unix machines using SSH by default. Ansible handles BSD machines in the same manner. +Ansible manages Linux/Unix machines using SSH by default. BSD machines are no exception, however this document covers some of the differences you may encounter with Ansible when working with BSD variants. -Depending on your control machine, Ansible will try to default to using OpenSSH. This works fine when using SSH keys to authenticate, but when using SSH passwords, Ansible relies on sshpass. Most -versions of sshpass do not deal well with BSD login prompts, so in these cases we recommend changing the transport to paramiko. You can do this in ansible.cfg globaly or set it as -an inventory/group/host variable:: +Typically, Ansible will try to default to using OpenSSH as a connection method. This is suitable when when using SSH keys to authenticate, but when using SSH passwords, Ansible relies on sshpass. Most +versions of sshpass do not deal particularly well with BSD login prompts, so when using SSH passwords against BSD machines, it is recommended to change the transport method to paramiko. You can do this in ansible.cfg globally or you can set it as an inventory/group/host variable. For example:: [freebsd] mybsdhost1 ansible_connection=paramiko -Ansible is agentless by default, but it needs some software installed on the target machines, mainly Python 2.4 or higher with an included json library (which is standard in Python 2.5 and above). -Without python you can still use the ``raw`` module to execute commands. This module is very limited, however it can be used to bootstrap Ansible on BSDs. - - +Ansible is agentless by default, however certain software is required on the target machines. Using Python 2.4 on the agents requires an additional py-simplejson package/library to be installed, however this library is already included in Python 2.5 and above. +Operating without Python is possible with the ``raw`` module. Although this module can be used to bootstrap Ansible and install Python on BSD variants (see below), it is very limited and the use of Python is required to make full use of Ansible's features. .. _bootstrap_bsd: Bootstrapping BSD ````````````````` -For Ansible to effectively manage your machine, we need to install Python along with a json library, in this case we are using Python 2.7 which already has json included. +As mentioned above, you can bootstrap Ansible with the ``raw`` module and remotely install Python on targets. The following example installs Python 2.7 which includes the json library required for full functionality of Ansible. On your control machine you can simply execute the following for most versions of FreeBSD:: ansible -m raw -a “pkg install -y python27” mybsdhost1 -Once this is done you can now use other Ansible modules aside from the ``raw`` module. +Once this is done you can now use other Ansible modules apart from the ``raw`` module. .. note:: - This example uses pkg_add, you should be able to substitute for the appropriate tool for your BSD, - also you might need to look up the exact package name you need. - + This example used pkg_add as used on FreeBSD, however you should be able to substitute the appropriate package tool for your BSD; the package name may also differ. Refer to the package list or documentation of the BSD variant you are using for the exact Python package name you intend to install. .. _python_location: -Setting python interpreter -`````````````````````````` +Setting the Python interpreter +`````````````````````````````` -To support the multitude of Unix/Linux OSs and distributions, Ansible cannot rely on the environment or ``env`` to find the correct Python. By default, modules point at ``/usr/bin/python`` as this is the most common location. On the BSDs you cannot rely on this so you should tell ansible where python is located, through the ``ansible_python_interpreter`` inventory variable:: +To support a variety of Unix/Linux operating systems and distributions, Ansible cannot always rely on the existing environment or ``env`` variables to locate the correct Python binary. By default, modules point at ``/usr/bin/python`` as this is the most common location. On BSD variants, this path may differ, so it is advised to inform Ansible of the binary's location, through the ``ansible_python_interpreter`` inventory variable. For example:: [freebsd:vars] ansible_python_interpreter=/usr/local/bin/python2.7 -If you use plugins other than those included with Ansible you might need to set similar variables for ``bash``, ``perl`` or ``ruby``, depending on how the plugin was written:: +If you use additional plugins beyond those bundled with Ansible, you can set similar variables for ``bash``, ``perl`` or ``ruby``, depending on how the plugin is written. For example:: [freebsd:vars] ansible_python_interpreter=/usr/local/bin/python ansible_perl_interpreter=/usr/bin/perl5 -What modules are available -`````````````````````````` +Which modules are available? +```````````````````````````` -Most of the core Ansible modules are written for a combination of Linux/Unix machines and arbitrary web services; most should work fine on the BSDs with the exception of those that are aimed at Linux specific technologies (i.e. lvg). +The majority of the core Ansible modules are written for a combination of Linux/Unix machines and other generic services, so most should function well on the BSDs with the obvious exception of those that are aimed at Linux-only technologies (such as LVG). +Using BSD as the control machine +```````````````````````````````` -You can also use a BSD as the control machine -````````````````````````````````````````````` - -It should be as simple as installing the Ansible package or follow the ``pip`` or 'from source' instructions. +Using BSD as the control machine is as simple as installing the Ansible package for your BSD variant or by following the ``pip`` or 'from source' instructions. .. _bsd_facts: BSD Facts ````````` -Ansible gathers facts from the BSDs as it would from Linux machines, but since the data, names and structures can be different for network, disks and other devices, one should expect the output to be different, but still familiar to a BSD administrator. - +Ansible gathers facts from the BSDs in a similar manner to Linux machines, but since the data, names and structures can vary for network, disks and other devices, one should expect the output to be slightly different yet still familiar to a BSD administrator. .. _bsd_contributions: -BSD Contributions -````````````````` +BSD Efforts and Contributions +````````````````````````````` -BSD support is important for Ansible. Even though the majority of our contributors use and target Linux we have an active BSD community and will strive to be as BSD friendly as possible. -Report any issues you see with BSD incompatibilities, even better to submit a pull request with the fix! +BSD support is important to us at Ansible. Even though the majority of our contributors use and target Linux we have an active BSD community and strive to be as BSD friendly as possible. +Please feel free to report any issues or incompatibilities you discover with BSD; pull requests with an included fix are also welcome! .. seealso:: From e7a9031d6116ca8249f4bc61ff60f90077981376 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 28 Jan 2016 09:43:29 -0800 Subject: [PATCH 0489/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 25414bbd1b2..93d02189f6d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 25414bbd1b28241a029a9f8875fc6349b090fc2d +Subproject commit 93d02189f6dcfa0578a0fac0fb1f289369ac13a5 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index e13942f9c57..36be779888c 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit e13942f9c572080f36e76a8c22f7000ffeedac94 +Subproject commit 36be779888cb69f6a2849c72731a9d03e5565d1e From fa9822df0f7a9067b6feae9d8c187f860aae1a29 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 28 Jan 2016 10:50:20 -0800 Subject: [PATCH 0490/1113] Changes to convert to unicode at the borders The module docs and vault changes solve issues where tracebacks can happen. The galaxy changes are mostly refactoring to be more pythonic with a small chance that a unicode traceback could have occurred there without the changes. The change in __init__.py when we actually call the pager makes things more robust but could hide places where we had bytes coming in already so I didn't want to change that without auditing where the text was coming from. Fixes #14178 --- lib/ansible/cli/__init__.py | 2 +- lib/ansible/cli/galaxy.py | 35 ++++++++++++++++++-------------- lib/ansible/cli/vault.py | 8 +++++++- lib/ansible/utils/module_docs.py | 6 +++--- 4 files changed, 31 insertions(+), 20 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 12ba8f89004..a91c9557b03 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -459,7 +459,7 @@ class CLI(object): os.environ['LESS'] = CLI.LESS_OPTS try: cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) - cmd.communicate(input=text.encode(sys.stdout.encoding)) + cmd.communicate(input=to_bytes(text)) except IOError: pass except KeyboardInterrupt: diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 08488154e25..17f06409bb5 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -39,6 +39,7 @@ from ansible.galaxy.role import GalaxyRole from ansible.galaxy.login import GalaxyLogin from ansible.galaxy.token import GalaxyToken from ansible.playbook.role.requirement import RoleRequirement +from ansible.utils.unicode import to_unicode try: from __main__ import display @@ -161,8 +162,8 @@ class GalaxyCLI(CLI): def _display_role_info(self, role_info): - text = "\nRole: %s \n" % role_info['name'] - text += "\tdescription: %s \n" % role_info.get('description', '') + text = [u"", u"Role: %s" % to_unicode(role_info['name'])] + text.append(u"\tdescription: %s" % role_info.get('description', '')) for k in sorted(role_info.keys()): @@ -171,14 +172,15 @@ class GalaxyCLI(CLI): if isinstance(role_info[k], dict): text += "\t%s: \n" % (k) + text.append(u"\t%s:" % (k)) for key in sorted(role_info[k].keys()): if key in self.SKIP_INFO_KEYS: continue - text += "\t\t%s: %s\n" % (key, role_info[k][key]) + text.append(u"\t\t%s: %s" % (key, role_info[k][key])) else: - text += "\t%s: %s\n" % (k, role_info[k]) + text.append(u"\t%s: %s" % (k, role_info[k])) - return text + return u'\n'.join(text) ############################ # execute actions @@ -322,9 +324,11 @@ class GalaxyCLI(CLI): if role_spec: role_info.update(role_spec) - data += self._display_role_info(role_info) + data = self._display_role_info(role_info) + ### FIXME: This is broken in both 1.9 and 2.0 as + # _display_role_info() always returns something if not data: - data += "\n- the role %s was not found" % role + data = u"\n- the role %s was not found" % role self.pager(data) @@ -518,24 +522,25 @@ class GalaxyCLI(CLI): display.display("No roles match your search.", color=C.COLOR_ERROR) return True - data = '' + data = [u''] if response['count'] > page_size: - data += ("\nFound %d roles matching your search. Showing first %s.\n" % (response['count'], page_size)) + data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size)) else: - data += ("\nFound %d roles matching your search:\n" % response['count']) + data.append(u"Found %d roles matching your search:" % response['count']) max_len = [] for role in response['results']: max_len.append(len(role['username'] + '.' + role['name'])) name_len = max(max_len) - format_str = " %%-%ds %%s\n" % name_len - data +='\n' - data += (format_str % ("Name", "Description")) - data += (format_str % ("----", "-----------")) + format_str = u" %%-%ds %%s" % name_len + data.append(u'') + data.append(format_str % (u"Name", u"Description")) + data.append(format_str % (u"----", u"-----------")) for role in response['results']: - data += (format_str % (role['username'] + '.' + role['name'],role['description'])) + data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description'])) + data = u'\n'.join(data) self.pager(data) return True diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index 9908f17e578..85bdac6993e 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -26,6 +26,7 @@ from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.parsing.dataloader import DataLoader from ansible.parsing.vault import VaultEditor from ansible.cli import CLI +from ansible.utils.unicode import to_unicode try: from __main__ import display @@ -157,7 +158,12 @@ class VaultCLI(CLI): def execute_view(self): for f in self.args: - self.pager(self.editor.plaintext(f)) + # Note: vault should return byte strings because it could encrypt + # and decrypt binary files. We are responsible for changing it to + # unicode here because we are displaying it and therefore can make + # the decision that the display doesn't have to be precisely what + # the input was (leave that to decrypt instead) + self.pager(to_unicode(self.editor.plaintext(f))) def execute_rekey(self): for f in self.args: diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index 14a5d030565..e647da00143 100755 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -24,7 +24,7 @@ __metaclass__ = type import os import sys import ast -import yaml +from ansible.parsing.yaml.loader import AnsibleLoader import traceback from collections import MutableMapping, MutableSet, MutableSequence @@ -71,7 +71,7 @@ def get_docstring(filename, verbose=False): continue if 'DOCUMENTATION' in theid: - doc = yaml.safe_load(child.value.s) + doc = AnsibleLoader(child.value.s, file_name=filename).get_single_data() fragments = doc.get('extends_documentation_fragment', []) if isinstance(fragments, basestring): @@ -91,7 +91,7 @@ def get_docstring(filename, verbose=False): assert fragment_class is not None fragment_yaml = getattr(fragment_class, fragment_var, '{}') - fragment = yaml.safe_load(fragment_yaml) + fragment = AnsibleLoader(fragment_yaml, file_name=filename).get_single_data() if fragment.has_key('notes'): notes = fragment.pop('notes') From 901d349f459b2ca97e483224da9d2097ba4c2204 Mon Sep 17 00:00:00 2001 From: Paul Calabro <pcalabro@paypal.com> Date: Thu, 28 Jan 2016 13:05:10 -0700 Subject: [PATCH 0491/1113] Adding a hyphen in the perms pattern section, since doing something like go=- is a quick way to strip all permissions for non-owners. --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index b420f18e6e8..3ba19886b2f 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -929,7 +929,7 @@ class AnsibleModule(object): def _symbolic_mode_to_octal(self, path_stat, symbolic_mode): new_mode = stat.S_IMODE(path_stat.st_mode) - mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst]*|[ugo])$') + mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst-]*|[ugo])$') for mode in symbolic_mode.split(','): match = mode_re.match(mode) if match: From 4547ac7fb1a39f86e179734036ec158bb4f91c03 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Mon, 30 Nov 2015 13:45:14 -0600 Subject: [PATCH 0492/1113] v2 version of slack callback plugin --- lib/ansible/plugins/callback/slack.py | 230 ++++++++++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 lib/ansible/plugins/callback/slack.py diff --git a/lib/ansible/plugins/callback/slack.py b/lib/ansible/plugins/callback/slack.py new file mode 100644 index 00000000000..575a1112c6a --- /dev/null +++ b/lib/ansible/plugins/callback/slack.py @@ -0,0 +1,230 @@ +# (C) 2014-2015, Matt Martz <matt@sivel.net> + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json +import os +import uuid + +from __main__ import cli + +from ansible.constants import mk_boolean +from ansible.module_utils.urls import open_url +from ansible.plugins.callback import CallbackBase + +try: + import prettytable + HAS_PRETTYTABLE = True +except ImportError: + HAS_PRETTYTABLE = False + + +class CallbackModule(CallbackBase): + """This is an ansible callback plugin that sends status + updates to a Slack channel during playbook execution. + + This plugin makes use of the following environment variables: + SLACK_WEBHOOK_URL (required): Slack Webhook URL + SLACK_CHANNEL (optional): Slack room to post in. Default: #ansible + SLACK_USERNAME (optional): Username to post as. Default: ansible + SLACK_INVOCATION (optional): Show command line invocation + details. Default: False + + Requires: + prettytable + + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'slack' + CALLBACK_NEEDS_WHITELIST = True + + def __init__(self, display=None): + + self.disabled = False + + if cli: + self._options = cli.options + else: + self._options = None + + + super(CallbackModule, self).__init__(display=display) + + if not HAS_PRETTYTABLE: + self.disabled = True + self._display.warning('The `prettytable` python module is not ' + 'installed. Disabling the Slack callback ' + 'plugin.') + + self.webhook_url = os.getenv('SLACK_WEBHOOK_URL') + self.channel = os.getenv('SLACK_CHANNEL', '#ansible') + self.username = os.getenv('SLACK_USERNAME', 'ansible') + self.show_invocation = mk_boolean( + os.getenv('SLACK_INVOCATION', self._display.verbosity > 1) + ) + + if self.webhook_url is None: + self.disabled = True + self._display.warning('Slack Webhook URL was not provided. The ' + 'Slack Webhook URL can be provided using ' + 'the `SLACK_WEBHOOK_URL` environment ' + 'variable.') + + self.playbook_name = None + + # This is a 6 character identifier provided with each message + # This makes it easier to correlate messages when there are more + # than 1 simultaneous playbooks running + self.guid = uuid.uuid4().hex[:6] + + def send_msg(self, attachments): + payload = { + 'channel': self.channel, + 'username': self.username, + 'attachments': attachments, + 'parse': 'none', + 'icon_url': ('http://cdn2.hubspot.net/hub/330046/' + 'file-449187601-png/ansible_badge.png'), + } + + data = json.dumps(payload) + self._display.debug(data) + self._display.debug(self.webhook_url) + try: + response = open_url(self.webhook_url, data=data) + return response.read() + except Exception as e: + self._display.warning('Could not submit message to Slack: %s' % + str(e)) + + def v2_playbook_on_start(self, playbook): + self.playbook_name = os.path.basename(playbook._file_name) + + title = [ + '*Playbook initiated* (_%s_)' % self.guid + ] + invocation_items = [] + if self._options and self.show_invocation: + tags = self._options.tags + skip_tags = self._options.skip_tags + extra_vars = self._options.extra_vars + subset = self._options.subset + inventory = os.path.basename( + os.path.realpath(self._options.inventory) + ) + + invocation_items.append('Inventory: %s' % inventory) + if tags and tags != 'all': + invocation_items.append('Tags: %s' % tags) + if skip_tags: + invocation_items.append('Skip Tags: %s' % skip_tags) + if subset: + invocation_items.append('Limit: %s' % subset) + if extra_vars: + invocation_items.append('Extra Vars: %s' % + ' '.join(extra_vars)) + + title.append('by *%s*' % self._options.remote_user) + + title.append('\n\n*%s*' % self.playbook_name) + msg_items = [' '.join(title)] + if invocation_items: + msg_items.append('```\n%s\n```' % '\n'.join(invocation_items)) + + msg = '\n'.join(msg_items) + + attachments = [{ + 'fallback': msg, + 'fields': [ + { + 'value': msg + } + ], + 'color': 'warning', + 'mrkdwn_in': ['text', 'fallback', 'fields'], + }] + + self.send_msg(attachments=attachments) + + def v2_playbook_on_play_start(self, play): + """Display Play start messages""" + + name = play.name or 'Play name not specified (%s)' % play._uuid + msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name) + attachments = [ + { + 'fallback': msg, + 'text': msg, + 'color': 'warning', + 'mrkdwn_in': ['text', 'fallback', 'fields'], + } + ] + self.send_msg(attachments=attachments) + + def v2_playbook_on_stats(self, stats): + """Display info about playbook statistics""" + + hosts = sorted(stats.processed.keys()) + + t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', + 'Failures']) + + failures = False + unreachable = False + + for h in hosts: + s = stats.summarize(h) + + if s['failures'] > 0: + failures = True + if s['unreachable'] > 0: + unreachable = True + + t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', + 'failures']]) + + attachments = [] + msg_items = [ + '*Playbook Complete* (_%s_)' % self.guid + ] + if failures or unreachable: + color = 'danger' + msg_items.append('\n*Failed!*') + else: + color = 'good' + msg_items.append('\n*Success!*') + + msg_items.append('```\n%s\n```' % t) + + msg = '\n'.join(msg_items) + + attachments.append({ + 'fallback': msg, + 'fields': [ + { + 'value': msg + } + ], + 'color': color, + 'mrkdwn_in': ['text', 'fallback', 'fields'] + }) + + self.send_msg(attachments=attachments) From 6d1a834b81aa2e1b30c064d9c7c0c038f9a88265 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Mon, 11 Aug 2014 15:51:27 -0500 Subject: [PATCH 0493/1113] Add vault-keyring.py contrib script that can be used with --vault-password-file --- contrib/vault/vault-keyring.py | 77 ++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 contrib/vault/vault-keyring.py diff --git a/contrib/vault/vault-keyring.py b/contrib/vault/vault-keyring.py new file mode 100644 index 00000000000..d294bff0ea5 --- /dev/null +++ b/contrib/vault/vault-keyring.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# (c) 2014, Matt Martz <matt@sivel.net> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +# +# Script to be used with vault_password_file or --vault-password-file +# to retrieve the vault password via your OSes native keyring application +# +# This script requires the ``keyring`` python module +# +# Add a [vault] section to your ansible.cfg file, +# the only option is 'username'. Example: +# +# [vault] +# username = 'ansible_vault' +# +# Additionally, it would be a good idea to configure vault_password_file in +# ansible.cfg +# +# [defaults] +# ... +# vault_password_file = /path/to/vault-keyring.py +# ... +# +# To set your password: python /path/to/vault-keyring.py set +# +# If you choose to not configure the path to vault_password_file in ansible.cfg +# your ansible-playbook command may look like: +# +# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml + +import sys +import getpass +import keyring + +import ansible.constants as C + + +def main(): + parser = C.load_config_file() + try: + username = parser.get('vault', 'username') + except: + sys.stderr.write('No [vault] section configured\n') + sys.exit(1) + + if len(sys.argv) == 2 and sys.argv[1] == 'set': + password = getpass.getpass() + confirm = getpass.getpass('Confirm password: ') + if password == confirm: + keyring.set_password('ansible', username, password) + else: + sys.stderr.write('Passwords do not match\n') + sys.exit(1) + else: + sys.stdout.write(keyring.get_password('ansible', username)) + + sys.exit(0) + + +if __name__ == '__main__': + main() From bf52e541fb85acad99d6d4ea57eced44b2b2bc6a Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Thu, 28 Jan 2016 16:20:59 -0600 Subject: [PATCH 0494/1113] Add newline when printing the password --- contrib/vault/vault-keyring.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/vault/vault-keyring.py b/contrib/vault/vault-keyring.py index d294bff0ea5..bc001476c76 100644 --- a/contrib/vault/vault-keyring.py +++ b/contrib/vault/vault-keyring.py @@ -68,7 +68,7 @@ def main(): sys.stderr.write('Passwords do not match\n') sys.exit(1) else: - sys.stdout.write(keyring.get_password('ansible', username)) + sys.stdout.write('%s\n' % keyring.get_password('ansible', username)) sys.exit(0) From 2c825539ffb737051add23dbb9f16a5fef24e469 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 28 Jan 2016 16:02:57 -0800 Subject: [PATCH 0495/1113] When setting up the local connection for the rsync we need to set the shell as well. Fixes #13490 --- lib/ansible/plugins/action/synchronize.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index bbd67dffecf..9b267844b81 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -20,6 +20,7 @@ __metaclass__ = type import os.path +from ansible.playbook.play_context import MAGIC_VARIABLE_MAPPING from ansible.plugins.action import ActionBase from ansible.plugins import connection_loader from ansible.utils.boolean import boolean @@ -185,7 +186,7 @@ class ActionModule(ActionBase): localhost_ports = set() for host in C.LOCALHOST: localhost_vars = task_vars['hostvars'].get(host, {}) - for port_var in ('ansible_port', 'ansible_ssh_port'): + for port_var in MAGIC_VARIABLE_MAPPING['port']: port = localhost_vars.get(port_var, None) if port: break @@ -228,6 +229,21 @@ class ActionModule(ActionBase): if not use_delegate and remote_transport: # Create a connection to localhost to run rsync on new_stdin = self._connection._new_stdin + + # Unike port, there can be only one shell + localhost_shell = None + for host in C.LOCALHOST: + localhost_vars = task_vars['hostvars'].get(host, {}) + for shell_var in MAGIC_VARIABLE_MAPPING['shell']: + localhost_shell = localhost_vars.get(shell_var, None) + if localhost_shell: + break + if localhost_shell: + break + else: + localhost_shell = os.path.basename(C.DEFAULT_EXECUTABLE) + self._play_context.shell = localhost_shell + new_connection = connection_loader.get('local', self._play_context, new_stdin) self._connection = new_connection self._override_module_replaced_vars(task_vars) From db375c22afcac46641fc7368f802c698279f55e6 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 28 Jan 2016 19:43:17 -0500 Subject: [PATCH 0496/1113] load now does not modify the incomming data also removed json loader as yaml loader can do both --- lib/ansible/parsing/dataloader.py | 39 +++++++++++++------------------ 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py index c54ba78f1fe..b295560c931 100644 --- a/lib/ansible/parsing/dataloader.py +++ b/lib/ansible/parsing/dataloader.py @@ -20,7 +20,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import copy -import json import os import stat import subprocess @@ -74,30 +73,24 @@ class DataLoader(): a JSON or YAML string. ''' + # YAML parser will take JSON as it is a subset. + if isinstance(data, AnsibleUnicode): + # The PyYAML's libyaml bindings use PyUnicode_CheckExact so + # they are unable to cope with our subclass. + # Unwrap and re-wrap the unicode so we can keep track of line + # numbers + in_data = text_type(data) + else: + in_data = data try: - # we first try to load this data as JSON - return json.loads(data) - except: - # if loading JSON failed for any reason, we go ahead - # and try to parse it as YAML instead + new_data = self._safe_load(in_data, file_name=file_name) + except YAMLError as yaml_exc: + self._handle_error(yaml_exc, file_name, show_content) - if isinstance(data, AnsibleUnicode): - # The PyYAML's libyaml bindings use PyUnicode_CheckExact so - # they are unable to cope with our subclass. - # Unwrap and re-wrap the unicode so we can keep track of line - # numbers - new_data = text_type(data) - else: - new_data = data - try: - new_data = self._safe_load(new_data, file_name=file_name) - except YAMLError as yaml_exc: - self._handle_error(yaml_exc, file_name, show_content) - - if isinstance(data, AnsibleUnicode): - new_data = AnsibleUnicode(new_data) - new_data.ansible_pos = data.ansible_pos - return new_data + if isinstance(data, AnsibleUnicode): + new_data = AnsibleUnicode(new_data) + new_data.ansible_pos = data.ansible_pos + return new_data def load_from_file(self, file_name): ''' Loads data from a file, which can contain either JSON or YAML. ''' From dd4787701f45536ac37fe81d46ba0846caf35411 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 29 Jan 2016 10:41:57 -0500 Subject: [PATCH 0497/1113] fix unicode input for template lookup fixes #14207 --- lib/ansible/plugins/lookup/template.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/lookup/template.py b/lib/ansible/plugins/lookup/template.py index 2ca2e5673bf..3f452a11980 100644 --- a/lib/ansible/plugins/lookup/template.py +++ b/lib/ansible/plugins/lookup/template.py @@ -22,6 +22,7 @@ import os from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase +from ansible.utils.unicode import to_unicode try: from __main__ import display @@ -46,7 +47,7 @@ class LookupModule(LookupBase): display.vvvv("File lookup using %s as file" % lookupfile) if lookupfile and os.path.exists(lookupfile): with open(lookupfile, 'r') as f: - template_data = f.read() + template_data = to_unicode(f.read()) searchpath = [self._loader._basedir, os.path.dirname(lookupfile)] if 'role_path' in variables: From ceef202024423dd35ecef93f947d661f71c33e75 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 29 Jan 2016 09:59:58 -0800 Subject: [PATCH 0498/1113] Update submodule refs --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 36be779888c..fff5ae6994f 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 36be779888cb69f6a2849c72731a9d03e5565d1e +Subproject commit fff5ae6994fbe64d45323bc1d11f6103e211f524 From 5587b08335f223fff64f54ced6f2790b3d6ee6f0 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 29 Jan 2016 15:32:35 -0500 Subject: [PATCH 0499/1113] Make sure setup tasks inherit properly from their parent play Fixes #13602 --- lib/ansible/executor/play_iterator.py | 82 ++++++++++++++++----------- 1 file changed, 48 insertions(+), 34 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 09caeec2d98..182643246f9 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -132,8 +132,18 @@ class PlayIterator: def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False): self._play = play - self._blocks = [] + + setup_block = Block(play=self._play) + setup_task = Task(block=setup_block) + setup_task.action = 'setup' + setup_task.args = {} + setup_task.set_loader(self._play._loader) + setup_block.block = [setup_task] + + setup_block = setup_block.filter_tagged_tasks(play_context, all_vars) + self._blocks.append(setup_block) + for block in self._play.compile(): new_block = block.filter_tagged_tasks(play_context, all_vars) if new_block.has_tasks(): @@ -188,35 +198,9 @@ class PlayIterator: if s.run_state == self.ITERATING_COMPLETE: display.debug("host %s is done iterating, returning" % host.name) return (None, None) - elif s.run_state == self.ITERATING_SETUP: - s.run_state = self.ITERATING_TASKS - s.pending_setup = True - # Gather facts if the default is 'smart' and we have not yet - # done it for this host; or if 'explicit' and the play sets - # gather_facts to True; or if 'implicit' and the play does - # NOT explicitly set gather_facts to False. - - gathering = C.DEFAULT_GATHERING - implied = self._play.gather_facts is None or boolean(self._play.gather_facts) - - if (gathering == 'implicit' and implied) or \ - (gathering == 'explicit' and boolean(self._play.gather_facts)) or \ - (gathering == 'smart' and implied and not host._gathered_facts): - if not peek: - # mark the host as having gathered facts - host.set_gathered_facts(True) - - task = Task() - task.action = 'setup' - task.args = {} - task.set_loader(self._play._loader) - else: - s.pending_setup = False - - if not task: - old_s = s - (s, task) = self._get_next_task_from_state(s, peek=peek) + old_s = s + (s, task) = self._get_next_task_from_state(s, host=host, peek=peek) def _roles_are_different(ra, rb): if ra != rb: @@ -240,7 +224,7 @@ class PlayIterator: return (s, task) - def _get_next_task_from_state(self, state, peek): + def _get_next_task_from_state(self, state, host, peek): task = None @@ -255,7 +239,37 @@ class PlayIterator: state.run_state = self.ITERATING_COMPLETE return (state, None) - if state.run_state == self.ITERATING_TASKS: + if state.run_state == self.ITERATING_SETUP: + if not state.pending_setup: + state.pending_setup = True + + # Gather facts if the default is 'smart' and we have not yet + # done it for this host; or if 'explicit' and the play sets + # gather_facts to True; or if 'implicit' and the play does + # NOT explicitly set gather_facts to False. + + gathering = C.DEFAULT_GATHERING + implied = self._play.gather_facts is None or boolean(self._play.gather_facts) + + if (gathering == 'implicit' and implied) or \ + (gathering == 'explicit' and boolean(self._play.gather_facts)) or \ + (gathering == 'smart' and implied and not host._gathered_facts): + # mark the host as having gathered facts + host.set_gathered_facts(True) + setup_block = self._blocks[0] + if setup_block.has_tasks() and len(setup_block.block) > 0: + task = setup_block.block[0] + else: + state.pending_setup = False + + state.cur_block += 1 + state.cur_regular_task = 0 + state.cur_rescue_task = 0 + state.cur_always_task = 0 + state.run_state = self.ITERATING_TASKS + state.child_state = None + + elif state.run_state == self.ITERATING_TASKS: # clear the pending setup flag, since we're past that and it didn't fail if state.pending_setup: state.pending_setup = False @@ -272,7 +286,7 @@ class PlayIterator: state.tasks_child_state = HostState(blocks=[task]) state.tasks_child_state.run_state = self.ITERATING_TASKS state.tasks_child_state.cur_role = state.cur_role - (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, peek=peek) + (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek) if task is None: # check to see if the child state was failed, if so we need to # fail here too so we don't continue iterating tasks @@ -298,7 +312,7 @@ class PlayIterator: state.rescue_child_state = HostState(blocks=[task]) state.rescue_child_state.run_state = self.ITERATING_TASKS state.rescue_child_state.cur_role = state.cur_role - (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, peek=peek) + (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek) if task is None: # check to see if the child state was failed, if so we need to # fail here too so we don't continue iterating rescue @@ -328,7 +342,7 @@ class PlayIterator: state.always_child_state = HostState(blocks=[task]) state.always_child_state.run_state = self.ITERATING_TASKS state.always_child_state.cur_role = state.cur_role - (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, peek=peek) + (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek) if task is None: # check to see if the child state was failed, if so we need to # fail here too so we don't continue iterating always From 1aae6499cabf6a1d8b12987cbf1e4d9ed97f4fe0 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 29 Jan 2016 02:00:40 -0500 Subject: [PATCH 0500/1113] fixed code for v1 callback runtime compatiblity old exception code obsoleted by exception avoidance errors on callback produce warnings and don't stop play --- lib/ansible/executor/task_queue_manager.py | 58 ++++++++++------------ 1 file changed, 26 insertions(+), 32 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 64b952126fe..feb0ab526f0 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -19,7 +19,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from multiprocessing.managers import SyncManager, DictProxy import multiprocessing import os import tempfile @@ -27,7 +26,6 @@ import tempfile from ansible import constants as C from ansible.errors import AnsibleError from ansible.executor.play_iterator import PlayIterator -from ansible.executor.process.worker import WorkerProcess from ansible.executor.process.result import ResultProcess from ansible.executor.stats import AggregateStats from ansible.playbook.play_context import PlayContext @@ -284,35 +282,31 @@ class TaskQueueManager: # see osx_say.py example for such a plugin if getattr(callback_plugin, 'disabled', False): continue - methods = [ - getattr(callback_plugin, method_name, None), - getattr(callback_plugin, 'v2_on_any', None) - ] + + # try to find v2 method, fallback to v1 method, ignore callback if no method found + methods = [] + for possible in [method_name, 'v2_on_any']: + gotit = getattr(callback_plugin, possible, None) + if gotit is None: + gotit = getattr(callback_plugin, possible.replace('v2_',''), None) + if gotit is not None: + methods.append(gotit) + for method in methods: - if method is not None: - try: - # temporary hack, required due to a change in the callback API, so - # we don't break backwards compatibility with callbacks which were - # designed to use the original API - # FIXME: target for removal and revert to the original code here - # after a year (2017-01-14) - if method_name == 'v2_playbook_on_start': - import inspect - (f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method) - if 'playbook' in f_args: - method(*args, **kwargs) - else: - method() - else: + try: + # temporary hack, required due to a change in the callback API, so + # we don't break backwards compatibility with callbacks which were + # designed to use the original API + # FIXME: target for removal and revert to the original code here after a year (2017-01-14) + if method_name == 'v2_playbook_on_start': + import inspect + (f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method) + if 'playbook' in f_args: method(*args, **kwargs) - except Exception as e: - import traceback - orig_tb = to_unicode(traceback.format_exc()) - try: - v1_method = method.replace('v2_','') - v1_method(*args, **kwargs) - except Exception: - if display.verbosity >= 3: - display.warning(orig_tb, formatted=True) - else: - display.warning('Error when using %s: %s' % (method, str(e))) + else: + method() + else: + method(*args, **kwargs) + except Exception as e: + #TODO: add config toggle to make this fatal or not? + display.warning(u"Failure when attempting to use callback plugin (%s): %s" % (to_unicode(callback_plugin), to_unicode(e))) From f26673904f3e8618359a845d9cc0956da5157b38 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 29 Jan 2016 17:46:18 -0500 Subject: [PATCH 0501/1113] updated intro adhoc to use become --- docsite/rst/intro_adhoc.rst | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index e9abdccc95b..1d614bda7a0 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -11,12 +11,11 @@ ad hoc tasks. What's an ad-hoc command? An ad-hoc command is something that you might type in to do something really -quick, but don't want to save for later. +quick, but don't want to save for later. This is a good place to start to understand the basics of what Ansible can do prior to learning the playbooks language -- ad-hoc commands can also be used -to do quick things that you might not necessarily want to write a full playbook -for. +to do quick things that you might not necessarily want to write a full playbook for. Generally speaking, the true power of Ansible lies in playbooks. Why would you use ad-hoc tasks versus playbooks? @@ -25,7 +24,7 @@ For instance, if you wanted to power off all of your lab for Christmas vacation, you could execute a quick one-liner in Ansible without writing a playbook. For configuration management and deployments, though, you'll want to pick up on -using '/usr/bin/ansible-playbook' -- the concepts you will learn here will +using '/usr/bin/ansible-playbook' -- the concepts you will learn here will port over directly to the playbook language. (See :doc:`playbooks` for more information about those) @@ -60,25 +59,24 @@ behavior, pass in "-u username". If you want to run commands as a different use $ ansible atlanta -a "/usr/bin/foo" -u username -Often you'll not want to just do things from your user account. If you want to run commands through sudo:: +Often you'll not want to just do things from your user account. If you want to run commands through privilege escalation:: - $ ansible atlanta -a "/usr/bin/foo" -u username --sudo [--ask-sudo-pass] + $ ansible atlanta -a "/usr/bin/foo" -u username --become [--ask-become-pass] -Use ``--ask-sudo-pass`` (``-K``) if you are not using passwordless -sudo. This will interactively prompt you for the password to use. -Use of passwordless sudo makes things easier to automate, but it's not -required. +Use ``--ask-become-pass`` (``-K``) if you are not using a passwordless privilege escalation method (sudo/su/pfexec/doas/etc). +This will interactively prompt you for the password to use. +Use of a passwordless setup makes things easier to automate, but it's not required. -It is also possible to sudo to a user other than root using -``--sudo-user`` (``-U``):: +It is also possible to become a user other than root using +``--become-user``:: - $ ansible atlanta -a "/usr/bin/foo" -u username -U otheruser [--ask-sudo-pass] + $ ansible atlanta -a "/usr/bin/foo" -u username --become-user otheruser [--ask-become-pass] .. note:: - - Rarely, some users have security rules where they constrain their sudo environment to running specific command paths only. + + Rarely, some users have security rules where they constrain their sudo/pbrun/doas environment to running specific command paths only. This does not work with ansible's no-bootstrapping philosophy and hundreds of different modules. - If doing this, use Ansible from a special account that does not have this constraint. + If doing this, use Ansible from a special account that does not have this constraint. One way of doing this without sharing access to unauthorized users would be gating Ansible with :doc:`tower`, which can hold on to an SSH credential and let members of certain organizations use it on their behalf without having direct access. From eaf34fa2be0949ec186359578de531fe2c6d0ca0 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 29 Jan 2016 18:40:06 -0500 Subject: [PATCH 0502/1113] added diff feature to changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4037ea7f9f1..4f7384d5849 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,10 @@ Ansible Changes By Release ## 2.1 TBD - ACTIVE DEVELOPMENT +###Major Changes: + +* added facility for modules to send back 'diff' for display when ansible is called with --diff, file, puppet and other module already implement this + ####New Modules: * aws: ec2_vpc_net_facts * cloudstack: cs_volume From 7964a35918363c19fee6f38ffb1feb7a51560468 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 29 Jan 2016 18:46:56 -0500 Subject: [PATCH 0503/1113] harcoding inventory/hosts when pulling from repo return to pre 2.0 behaviour which did not factor inventory into the repo pull --- lib/ansible/cli/pull.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 2571717766e..e099555f3ec 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -160,15 +160,13 @@ class PullCLI(CLI): if not self.options.fullclone: repo_opts += ' depth=1' - path = module_loader.find_plugin(self.options.module_name) if path is None: raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) - cmd = '%s/ansible -i "%s" %s -m %s -a "%s" "%s"' % ( - bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts - ) + # hardcode local and inventory/host as this is just meant to fetch the repo + cmd = '%s/ansible -i "localhost," -c local %s -m %s -a "%s" all' % (bin_path, base_opts, self.options.module_name, repo_opts) for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev From fb57818ea3860902750215ab2876c2195a48192a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 29 Jan 2016 16:37:15 -0800 Subject: [PATCH 0504/1113] Explicitly set validate_certs=False as the boto default is True Fixes #14089 --- lib/ansible/module_utils/ec2.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 2263ad86f45..7b93d9bb7e0 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -163,8 +163,7 @@ def get_aws_connection_info(module, boto3=False): boto_params = dict(aws_access_key_id=access_key, aws_secret_access_key=secret_key, aws_session_token=security_token) - if validate_certs: - boto_params['verify'] = validate_certs + boto_params['verify'] = validate_certs if profile_name: boto_params['profile_name'] = profile_name @@ -181,7 +180,7 @@ def get_aws_connection_info(module, boto3=False): module.fail_json("boto does not support profile_name before 2.24") boto_params['profile_name'] = profile_name - if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"): + if HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"): boto_params['validate_certs'] = validate_certs for param, value in boto_params.items(): From 9e3932ffcad920adf2c9452e3739822c19bb2c2a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 29 Jan 2016 18:58:01 -0800 Subject: [PATCH 0505/1113] Some attributes of callbacks aren't in v2. Port plugins to the v2 way to do that Update porting guide with info on callback porting --- docsite/rst/porting_guide_2.0.rst | 28 +++++++++++++++++++- lib/ansible/plugins/callback/context_demo.py | 16 +++++++++++ lib/ansible/plugins/callback/hipchat.py | 5 +++- 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index a26763fc14a..489b74a287e 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -168,7 +168,33 @@ Action plugins Callback plugins ---------------- -* callback plugins +Although Ansible 2.0 provides a new callback API the old one continues to work +for most callback plugins. However, if your callback plugin makes use of +:attr:`self.playbook`, :attr:`self.play`, or :attr:`self.task` then you will +have to store the values for these yourself as ansible no longer automatically +populates the callback with them. Here's a short snippet that shows you how:: + + from ansible.plugins.callback import CallbackBase + + class CallbackModule(CallbackBase): + def __init__(self): + self.playbook = None + self.play = None + self.task = None + + def v2_playbook_on_start(self, playbook): + self.playbook = playbook + + def v2_playbook_on_play_start(self, play): + self.play = play + + def v2_playbook_on_task_start(self, task, is_conditional): + self.task = task + + def v2_on_any(self, *args, **kwargs): + self._display.display('%s: %s: %s' % (self.playbook.name, + self.play.name, self.task)) + Connection plugins ------------------ diff --git a/lib/ansible/plugins/callback/context_demo.py b/lib/ansible/plugins/callback/context_demo.py index ec4454c45a6..f01f8494063 100644 --- a/lib/ansible/plugins/callback/context_demo.py +++ b/lib/ansible/plugins/callback/context_demo.py @@ -31,8 +31,18 @@ class CallbackModule(CallbackBase): CALLBACK_NAME = 'context_demo' CALLBACK_NEEDS_WHITELIST = True + def __init__(self, *args, **kwargs): + self.task = None + self.play = None + def v2_on_any(self, *args, **kwargs): i = 0 + if self.play: + play_str = 'play: %s' % self.play.name + if self.task: + task_str = 'task: %s' % self.task + self._display.display("--- %s %s ---" % (self.play_str, self.task_str)) + self._display.display(" --- ARGS ") for a in args: self._display.display(' %s: %s' % (i, a)) @@ -41,3 +51,9 @@ class CallbackModule(CallbackBase): self._display.display(" --- KWARGS ") for k in kwargs: self._display.display(' %s: %s' % (k, kwargs[k])) + + def v2_playbook_on_play_start(self, play): + self.play = play + + def v2_playbook_on_task_start(self, task, is_conditional): + self.task = task diff --git a/lib/ansible/plugins/callback/hipchat.py b/lib/ansible/plugins/callback/hipchat.py index b31140128b0..602827aeace 100644 --- a/lib/ansible/plugins/callback/hipchat.py +++ b/lib/ansible/plugins/callback/hipchat.py @@ -73,6 +73,7 @@ class CallbackModule(CallbackBase): self.printed_playbook = False self.playbook_name = None + self.play = None def send_msg(self, msg, msg_format='text', color='yellow', notify=False): """Method for sending a message to HipChat""" @@ -93,9 +94,11 @@ class CallbackModule(CallbackBase): self.display.warning('Could not submit message to hipchat') - def playbook_on_play_start(self, name): + def v2_playbook_on_play_start(self, play): """Display Playbook and play start messages""" + self.play = play + name = play.name # This block sends information about a playbook when it starts # The playbook object is not immediately available at # playbook_on_start so we grab it via the play From 37eac4592e2bf2814c2325b57042fee26ccdcd8a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 30 Jan 2016 11:35:11 -0500 Subject: [PATCH 0506/1113] added back --profile to keep backwards compat this was broken by #12548 --- contrib/inventory/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index 4c5cf23fcb8..700b51a839e 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -407,7 +407,7 @@ class Ec2Inventory(object): help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') - parser.add_argument('--boto-profile', action='store', + parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', help='Use boto profile for connections to EC2') self.args = parser.parse_args() From 633b16d11626eb220f634a6166d856b178a77e99 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Sun, 31 Jan 2016 20:03:15 -0500 Subject: [PATCH 0507/1113] add exeception handling for invalid commands over nxapi This commit will catch invalid commands being send over nxapi and call fail_json on the module. The nxos shared module will now return the failure --- lib/ansible/module_utils/nxos.py | 35 +++++++++++++++++--------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py index 7a523dbae46..c2d49c90025 100644 --- a/lib/ansible/module_utils/nxos.py +++ b/lib/ansible/module_utils/nxos.py @@ -100,8 +100,8 @@ class Nxapi(object): headers = {'Content-Type': 'application/json'} - response, headers = fetch_url(self.module, self.url, data=data, headers=headers, - method='POST') + response, headers = fetch_url(self.module, self.url, data=data, + headers=headers, method='POST') if headers['status'] != 200: self.module.fail_json(**headers) @@ -109,20 +109,22 @@ class Nxapi(object): response = self.module.from_json(response.read()) result = list() - output = response['ins_api']['outputs']['output'] - if isinstance(output, list): - for item in response['ins_api']['outputs']['output']: - if item['code'] != '200': - self.module.fail_json(msg=item['msg'], command=item['input'], - code=item['code']) - else: - result.append(item['body']) - elif output['code'] != '200': - self.module.fail_json(msg=item['msg'], command=item['input'], - code=item['code']) - else: - result.append(output['body']) - + try: + output = response['ins_api']['outputs']['output'] + if isinstance(output, list): + for item in response['ins_api']['outputs']['output']: + if item['code'] != '200': + self.module.fail_json(msg=item['msg'], command=item['input'], + code=item['code']) + else: + result.append(item['body']) + elif output['code'] != '200': + self.module.fail_json(msg=item['msg'], command=item['input'], + code=item['code']) + else: + result.append(output['body']) + except Exception: + self.module.fail_json(**headers) return result @@ -200,6 +202,7 @@ class NetworkModule(AnsibleModule): cmd = 'show running-config' if self.params.get('include_defaults'): cmd += ' all' + if self.params['transport'] == 'cli': return self.execute(cmd)[0] else: From 1733bf4053b1ee299f06c16c9d167bd0a15ecc1a Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Sun, 31 Jan 2016 22:58:08 -0500 Subject: [PATCH 0508/1113] minor update for nxos This fixes a minor bug in the nxos config module to ensure that both the cli and nxapi transport return the running config as a string and not a list object. --- lib/ansible/module_utils/nxos.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py index c2d49c90025..3c8837e0968 100644 --- a/lib/ansible/module_utils/nxos.py +++ b/lib/ansible/module_utils/nxos.py @@ -203,10 +203,8 @@ class NetworkModule(AnsibleModule): if self.params.get('include_defaults'): cmd += ' all' - if self.params['transport'] == 'cli': - return self.execute(cmd)[0] - else: - return self.execute(cmd) + response = self.execute(cmd) + return response[0] def get_module(**kwargs): """Return instance of NetworkModule From b73c7728f46a020b01cd865131fe3f55fa8e121a Mon Sep 17 00:00:00 2001 From: Anthony Robinson <arobinson@qsfllc.com> Date: Mon, 1 Feb 2016 11:05:46 -0500 Subject: [PATCH 0509/1113] Update example vpc_desitnation_variable example with working settings --- contrib/inventory/ec2.ini | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/inventory/ec2.ini b/contrib/inventory/ec2.ini index 1231c9d4665..25947a88f0f 100644 --- a/contrib/inventory/ec2.ini +++ b/contrib/inventory/ec2.ini @@ -35,9 +35,9 @@ destination_variable = public_dns_name # private subnet, this should be set to 'private_ip_address', and Ansible must # be run from within EC2. The key of an EC2 tag may optionally be used; however # the boto instance variables hold precedence in the event of a collision. -# WARNING: - instances that are in the private vpc, _without_ public ip address +# WARNING: - instances that are in the private vpc, _without_ public ip address # will not be listed in the inventory until You set: -# vpc_destination_variable = 'private_ip_address' +# vpc_destination_variable = private_ip_address vpc_destination_variable = ip_address # To tag instances on EC2 with the resource records that point to them from @@ -144,7 +144,7 @@ group_by_elasticache_replication_group = True # You can use wildcards in filter values also. Below will list instances which # tag Name value matches webservers1* -# (ex. webservers15, webservers1a, webservers123 etc) +# (ex. webservers15, webservers1a, webservers123 etc) # instance_filters = tag:Name=webservers1* # A boto configuration profile may be used to separate out credentials From be82caefd257912d0dabfadbb4819d154dc2c469 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 19:27:06 -0500 Subject: [PATCH 0510/1113] initial add of action plugin eos_template Adds a new local action for eos_config module to handle templating configs and backing up running configurations. Implements the local action net_config Note this action was refactored from eos_config to eos_template --- lib/ansible/plugins/action/eos_template.py | 26 ++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 lib/ansible/plugins/action/eos_template.py diff --git a/lib/ansible/plugins/action/eos_template.py b/lib/ansible/plugins/action/eos_template.py new file mode 100644 index 00000000000..cc150d61838 --- /dev/null +++ b/lib/ansible/plugins/action/eos_template.py @@ -0,0 +1,26 @@ +# +# Copyright 2015 Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase +from ansible.plugins.action.net_template import ActionModule as NetActionModule + +class ActionModule(NetActionModule, ActionBase): + pass From 54f7a7b6d292fb5198da68bc2ffbfd4bcc46af6e Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 19:30:08 -0500 Subject: [PATCH 0511/1113] initial add of action plugin nxos_template Adds new local action for working with cisco nxos configurations. Implemements the net_config local action. Note this action plugin was refactored from nxos_config to nxos_template --- lib/ansible/plugins/action/nxos_template.py | 27 +++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 lib/ansible/plugins/action/nxos_template.py diff --git a/lib/ansible/plugins/action/nxos_template.py b/lib/ansible/plugins/action/nxos_template.py new file mode 100644 index 00000000000..2b63234f169 --- /dev/null +++ b/lib/ansible/plugins/action/nxos_template.py @@ -0,0 +1,27 @@ +# +# Copyright 2015 Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase +from ansible.plugins.action.net_template import ActionModule as NetActionModule + +class ActionModule(NetActionModule, ActionBase): + pass + From 4f2a75923e1d0303d3fa062da2cadeea047acce8 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 19:28:03 -0500 Subject: [PATCH 0512/1113] initial add of action plugin ios_template Adds a new local action ios_config for working with cisco ios configuration files. Implements the common net_confing local action Note this plugin was refactored from ios_config to ios_template --- lib/ansible/plugins/action/ios_template.py | 28 ++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 lib/ansible/plugins/action/ios_template.py diff --git a/lib/ansible/plugins/action/ios_template.py b/lib/ansible/plugins/action/ios_template.py new file mode 100644 index 00000000000..5334b644d32 --- /dev/null +++ b/lib/ansible/plugins/action/ios_template.py @@ -0,0 +1,28 @@ +# +# Copyright 2015 Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase +from ansible.plugins.action.net_template import ActionModule as NetActionModule + +class ActionModule(NetActionModule, ActionBase): + pass + + From b2f1c0691dc8428034caff68b466f7b11cc6e252 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 19:30:46 -0500 Subject: [PATCH 0513/1113] initial add of action plugin ops_template Adds new local action ops_config for handling openswitch configurations using either dc or cli based configurations. Implements the common net_config local action. Note this refactors the ops_config plugin to ops_template --- lib/ansible/plugins/action/ops_template.py | 50 ++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 lib/ansible/plugins/action/ops_template.py diff --git a/lib/ansible/plugins/action/ops_template.py b/lib/ansible/plugins/action/ops_template.py new file mode 100644 index 00000000000..84924fdb742 --- /dev/null +++ b/lib/ansible/plugins/action/ops_template.py @@ -0,0 +1,50 @@ +# +# Copyright 2015 Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible.plugins.action import ActionBase +from ansible.plugins.action.net_template import ActionModule as NetActionModule + +class ActionModule(NetActionModule, ActionBase): + + def run(self, tmp=None, task_vars=None): + if self._connection.transport == 'local': + return super(ActionModule, self).run(tmp, task_vars) + + result = dict(changed=False) + + if isinstance(self._task.args['src'], basestring): + self._handle_template() + + self._task.args['config'] = task_vars.get('config') + + result.update(self._execute_module(module_name=self._task.action, + module_args=self._task.args, task_vars=task_vars)) + + if self._task.args.get('backup') and '_config' in result: + contents = json.dumps(result['_config'], indent=4) + self._write_backup(task_vars['inventory_hostname'], contents) + del result['_config'] + + return result + + From ddfab5faf1192c13cf706246e83bc744ab9c1d41 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Mon, 1 Feb 2016 12:32:59 -0600 Subject: [PATCH 0514/1113] Use isinstance type checks in bool filter so we can handle unsafe proxy objects. Fixes #14240 --- lib/ansible/plugins/filter/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py index fed5097d919..51638d02ae4 100644 --- a/lib/ansible/plugins/filter/core.py +++ b/lib/ansible/plugins/filter/core.py @@ -41,7 +41,7 @@ import uuid import yaml from jinja2.filters import environmentfilter from distutils.version import LooseVersion, StrictVersion -from ansible.compat.six import iteritems +from ansible.compat.six import iteritems, string_types from ansible import errors from ansible.parsing.yaml.dumper import AnsibleDumper @@ -110,7 +110,7 @@ def bool(a): ''' return a bool for the arg ''' if a is None or type(a) == bool: return a - if type(a) in types.StringTypes: + if isinstance(a, string_types): a = a.lower() if a in ['yes', 'on', '1', 'true', 1]: return True From e0ed88e668f806e9dee404ce5c31957bdbac5f4f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 1 Feb 2016 13:42:55 -0500 Subject: [PATCH 0515/1113] normalize error message on src != dir when local now action plugin returns same error as module --- lib/ansible/plugins/action/assemble.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py index 64fb2f45cce..aae105400fd 100644 --- a/lib/ansible/plugins/action/assemble.py +++ b/lib/ansible/plugins/action/assemble.py @@ -109,6 +109,11 @@ class ActionModule(ActionBase): if regexp is not None: _re = re.compile(regexp) + if not os.path.isdir(src): + result['failed'] = True + result['msg'] = "Source (%s) is not a directory" % src + return result + # Does all work assembling the file path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden) From 6a62ad6c4b4dd8c2ed3402a1c08af13847574bb8 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 1 Feb 2016 15:17:23 -0500 Subject: [PATCH 0516/1113] hide internal params once used --- lib/ansible/module_utils/basic.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index a4af68ef8b3..27b4cc2f4da 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1150,6 +1150,10 @@ class AnsibleModule(object): elif check_invalid_arguments and k not in self._legal_inputs: self.fail_json(msg="unsupported parameter for module: %s" % k) + #clean up internal params: + if k.startswith('_ansible_'): + del self.params[k] + def _count_terms(self, check): count = 0 for term in check: From 28cf4bc00b263163cc1ed7844f9aef63c9e1893f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 1 Feb 2016 18:54:09 -0500 Subject: [PATCH 0517/1113] fix incorrect environment processing it was assumed it could only be a dict or string (it starts out as a list) also a 2nd assumption that bare vars only would appear in one of the dict keys. removed deprecation warnings from here as they should be signaled in the bare conversion itself. --- lib/ansible/playbook/task.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 6bd3caaca5b..4328602f593 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -244,11 +244,21 @@ class Task(Base, Conditional, Taggable, Become): if value is None: return dict() - for env_item in value: - if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables.keys(): - display.deprecated("Using bare variables for environment is deprecated." - " Update your playbooks so that the environment value uses the full variable syntax ('{{foo}}')") - break + elif isinstance(value, list): + if len(value) == 1: + return templar.template(value[0], convert_bare=True) + else: + env = [] + for env_item in value: + if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables.keys(): + env[env_item] = templar.template(env_item, convert_bare=True) + elif isinstance(value, dict): + env = dict() + for env_item in value: + if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables.keys(): + env[env_item] = templar.template(value[env_item], convert_bare=True) + + # at this point it should be a simple string return templar.template(value, convert_bare=True) def _post_validate_changed_when(self, attr, value, templar): From dc15eb806e1b5c040aedb0565218d26c369eb453 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 1 Feb 2016 18:59:14 -0500 Subject: [PATCH 0518/1113] deprecate all bare variable conversions (not debug) now deprecation message appears with variable name in all spots where this occurs debug's var= option is excluded as this is only place where bare variables shold actually be accepted. --- lib/ansible/plugins/action/debug.py | 2 +- lib/ansible/template/__init__.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index 2af20eddfc4..f07d8ea5d05 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -46,7 +46,7 @@ class ActionModule(ActionBase): elif 'var' in self._task.args: try: - results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True) + results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True, bare_deprecated=False) if results == self._task.args['var']: raise AnsibleUndefinedVariable except AnsibleUndefinedVariable: diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 8ce2358eb1e..fdc8eba720e 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -46,6 +46,12 @@ except ImportError: from numbers import Number +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + __all__ = ['Templar'] # A regex for checking to see if a variable we're trying to @@ -269,7 +275,7 @@ class Templar: self._available_variables = variables self._cached_result = {} - def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, convert_data=True, static_vars = [''], cache = True): + def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, convert_data=True, static_vars = [''], cache = True, bare_deprecated=True): ''' Templates (possibly recursively) any given data as input. If convert_bare is set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}') @@ -290,7 +296,7 @@ class Templar: try: if convert_bare: - variable = self._convert_bare_variable(variable) + variable = self._convert_bare_variable(variable, bare_deprecated=bare_deprecated) if isinstance(variable, string_types): result = variable @@ -366,7 +372,7 @@ class Templar: ''' return self.environment.block_start_string in data or self.environment.variable_start_string in data - def _convert_bare_variable(self, variable): + def _convert_bare_variable(self, variable, bare_deprecated): ''' Wraps a bare string, which may have an attribute portion (ie. foo.bar) in jinja2 variable braces so that it is evaluated properly. @@ -376,6 +382,9 @@ class Templar: contains_filters = "|" in variable first_part = variable.split("|")[0].split(".")[0].split("[")[0] if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable: + if bare_deprecated: + display.deprecated("Using bare variables is deprecated. Update your playbooks so that the environment value uses the full variable syntax ('%s%s%s')" % + (self.environment.variable_start_string, variable, self.environment.variable_end_string)) return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string) # the variable didn't meet the conditions to be converted, From 13c011c8fd32bfbddbc01ea6b1b859dc3cd26610 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 1 Feb 2016 23:48:30 -0500 Subject: [PATCH 0519/1113] added ec2_vol_facts to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f7384d5849..5a4bb8fc87a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ Ansible Changes By Release ####New Modules: * aws: ec2_vpc_net_facts +* aws: ec2_vol_facts * cloudstack: cs_volume ####New Filters: From 9c4a00ad298533f70295d5f755154231748b1e2c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 1 Feb 2016 23:54:51 -0500 Subject: [PATCH 0520/1113] added ec2_dhcp_options module to changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a4bb8fc87a..1ea4893728a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,8 +8,9 @@ Ansible Changes By Release * added facility for modules to send back 'diff' for display when ansible is called with --diff, file, puppet and other module already implement this ####New Modules: -* aws: ec2_vpc_net_facts * aws: ec2_vol_facts +* aws: ec2_vpc_dhcp_options.py +* aws: ec2_vpc_net_facts * cloudstack: cs_volume ####New Filters: From f628704a7129d38d0b8686c7af7493a934d34575 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 27 Jan 2016 11:09:29 -0500 Subject: [PATCH 0521/1113] added option groups to make --help readable Also moved -K to become to keep short option in view of removing the separate sudo/su prompt options --- lib/ansible/cli/__init__.py | 71 +++++++++++++++++++++---------------- 1 file changed, 41 insertions(+), 30 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index a91c9557b03..b9d320cec7a 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -266,52 +266,63 @@ class CLI(object): parser.add_option('-t', '--tree', dest='tree', default=None, help='log output to this directory') + if connect_opts: + connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts") + connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true', + help='ask for connection password') + connect_group.add_option('--private-key','--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', + help='use this file to authenticate the connection') + connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', + help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) + connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, + help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) + connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout', + help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT) + connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args', + help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)") + connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args', + help="specify extra arguments to pass to sftp only (e.g. -f, -l)") + connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args', + help="specify extra arguments to pass to scp only (e.g. -l)") + connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args', + help="specify extra arguments to pass to ssh only (e.g. -R)") + + parser.add_option_group(connect_group) + + runas_group = None + rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts") if runas_opts: + runas_group = rg # priv user defaults to root later on to enable detecting when this option was given here - parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', + runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") - parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, + runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None, help='desired sudo user (default=root) (deprecated, use become)') - parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true', + runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true', help='run operations with su (deprecated, use become)') - parser.add_option('-R', '--su-user', default=None, + runas_group.add_option('-R', '--su-user', default=None, help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER) # consolidated privilege escalation (become) - parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become', - help="run operations with become (nopasswd implied)") - parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string', + runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become', + help="run operations with become (does not imply password prompting)") + runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS, help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS))) - parser.add_option('--become-user', default=None, dest='become_user', type='string', + runas_group.add_option('--become-user', default=None, dest='become_user', type='string', help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER) if runas_opts or runas_prompt_opts: - parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', + if not runas_group: + runas_group = rg + runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', help='ask for sudo password (deprecated, use become)') - parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', + runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', help='ask for su password (deprecated, use become)') - parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', + runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', help='ask for privilege escalation password') - if connect_opts: - parser.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true', - help='ask for connection password') - parser.add_option('--private-key','--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', - help='use this file to authenticate the connection') - parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', - help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) - parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, - help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) - parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout', - help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT) - parser.add_option('--ssh-common-args', default='', dest='ssh_common_args', - help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)") - parser.add_option('--sftp-extra-args', default='', dest='sftp_extra_args', - help="specify extra arguments to pass to sftp only (e.g. -f, -l)") - parser.add_option('--scp-extra-args', default='', dest='scp_extra_args', - help="specify extra arguments to pass to scp only (e.g. -l)") - parser.add_option('--ssh-extra-args', default='', dest='ssh_extra_args', - help="specify extra arguments to pass to ssh only (e.g. -R)") + if runas_group: + parser.add_option_group(runas_group) if async_opts: parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval', From ed059ffca922a02f419e042d56a03035e475eef1 Mon Sep 17 00:00:00 2001 From: Robin Roth <robin.roth@kit.edu> Date: Tue, 2 Feb 2016 12:13:23 +0100 Subject: [PATCH 0522/1113] add integration test for ansible-pull * this test includes the behavior described in #13688 and #13681 * it runs a minimal playbook and checks for correct use of inventory and limit --- test/integration/Makefile | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 95b9d4320c4..8f2e880305b 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -21,7 +21,7 @@ VAULT_PASSWORD_FILE = vault-password CONSUL_RUNNING := $(shell python consul_running.py) -all: parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log +all: parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log parsing: ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5 @@ -30,6 +30,29 @@ parsing: includes: ansible-playbook test_includes.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) +pull: pull_run pull_no_127 pull_limit_inventory + +pull_run: + ansible-pull -d $(MYTMPDIR) -U https://github.com/ansible-test-robinro/pull-integration-test.git | grep MAGICKEYWORD; \ + RC=$$? ; \ + rm -rf $(MYTMPDIR); \ + exit $$RC + +# test for https://github.com/ansible/ansible/issues/13681 +pull_no_127: + ansible-pull -d $(MYTMPDIR) -U https://github.com/ansible-test-robinro/pull-integration-test.git | grep -v 127\.0\.0\.1; \ + RC=$$? ; \ + rm -rf $(MYTMPDIR); \ + exit $$RC + +# test for https://github.com/ansible/ansible/issues/13688 +pull_limit_inventory: + ansible-pull -d $(MYTMPDIR) -U https://github.com/ansible-test-robinro/pull-integration-test.git; \ + RC=$$? ; \ + rm -rf $(MYTMPDIR); \ + exit $$RC + + unicode: ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS) -e 'extra_var=café' # Test the start-at-task flag #9571 From 6105c8c77c7a1873165a4b04307219df100bd970 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 2 Feb 2016 09:40:49 -0500 Subject: [PATCH 0523/1113] corrected name of inventory_hostnames lookup plugin --- docsite/rst/playbooks_loops.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_loops.rst b/docsite/rst/playbooks_loops.rst index 6f14922deaf..9bccfa6b64a 100644 --- a/docsite/rst/playbooks_loops.rst +++ b/docsite/rst/playbooks_loops.rst @@ -536,11 +536,11 @@ There is also a specific lookup plugin ``inventory_hostname`` that can be used l # show all the hosts in the inventory - debug: msg={{ item }} - with_inventory_hostname: all + with_inventory_hostnames: all # show all the hosts matching the pattern, ie all but the group www - debug: msg={{ item }} - with_inventory_hostname: all:!www + with_inventory_hostnames: all:!www More information on the patterns can be found on :doc:`intro_patterns` From fa13aa8c007ac7bb3534626d3312e05fee2c7190 Mon Sep 17 00:00:00 2001 From: Kamil Szczygiel <kamil.szczygiel@intel.com> Date: Tue, 2 Feb 2016 15:47:56 +0100 Subject: [PATCH 0524/1113] rename param from skip_ssl to validate_certs --- lib/ansible/module_utils/vmware.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/vmware.py b/lib/ansible/module_utils/vmware.py index ca0440a26c3..6bba123f26d 100644 --- a/lib/ansible/module_utils/vmware.py +++ b/lib/ansible/module_utils/vmware.py @@ -105,7 +105,7 @@ def vmware_argument_spec(): hostname=dict(type='str', required=True), username=dict(type='str', aliases=['user', 'admin'], required=True), password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True), - skip_ssl=dict(type='bool', required=False, default=False), + validate_certs=dict(type='bool', required=False, default=True), ) @@ -114,15 +114,15 @@ def connect_to_api(module, disconnect_atexit=True): hostname = module.params['hostname'] username = module.params['username'] password = module.params['password'] - skip_ssl = module.params['skip_ssl'] + validate_certs = module.params['validate_certs'] try: - if skip_ssl: + if validate_certs: + service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password) + else: context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_NONE service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context) - else: - service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password) # Disabling atexit should be used in special cases only. # Such as IP change of the ESXi host which removes the connection anyway. From b2c0abe9988126861a4e3186dbf1a695d2bdb827 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 2 Feb 2016 10:22:08 -0500 Subject: [PATCH 0525/1113] Don't mark host as having gathered facts when peeking at next task Fixes #14243 --- lib/ansible/executor/play_iterator.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 182643246f9..e46a8d15077 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -255,10 +255,11 @@ class PlayIterator: (gathering == 'explicit' and boolean(self._play.gather_facts)) or \ (gathering == 'smart' and implied and not host._gathered_facts): # mark the host as having gathered facts - host.set_gathered_facts(True) setup_block = self._blocks[0] if setup_block.has_tasks() and len(setup_block.block) > 0: task = setup_block.block[0] + if not peek: + host.set_gathered_facts(True) else: state.pending_setup = False From 0e551e6d46c2049a366d45a6c101009a4b2d56fa Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 2 Feb 2016 11:00:31 -0500 Subject: [PATCH 0526/1113] clarified ansible_shell_type description --- docsite/rst/intro_inventory.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index 3f504376c29..6d9b5977fc1 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -248,7 +248,9 @@ Privilege escalation (see :doc:`Ansible Privilege Escalation<become>` for furthe Remote host environment parameters:: ansible_shell_type - The shell type of the target system. Commands are formatted using 'sh'-style syntax by default. Setting this to 'csh' or 'fish' will cause commands executed on target systems to follow those shell's syntax instead. + The shell type of the target system. You should not use this setting unless you have set the 'executable' to a non sh compatible shell. + By default commands are formatted using 'sh'-style syntax. + Setting this to 'csh' or 'fish' will cause commands executed on target systems to follow those shell's syntax instead. ansible_python_interpreter The target host python path. This is useful for systems with more than one Python or not located at "/usr/bin/python" such as \*BSD, or where /usr/bin/python From 96e22970ee2e0afa9a99377fa533b5fc028d2379 Mon Sep 17 00:00:00 2001 From: Greg Taylor <snagglepants@gmail.com> Date: Tue, 2 Feb 2016 09:58:08 -0800 Subject: [PATCH 0527/1113] Fix a note tag in the API docs. --- docsite/rst/developing_api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_api.rst b/docsite/rst/developing_api.rst index 96a447c05c1..c6369f77b99 100644 --- a/docsite/rst/developing_api.rst +++ b/docsite/rst/developing_api.rst @@ -20,7 +20,7 @@ This chapter discusses the Python API. The Python API is very powerful, and is how the all the ansible CLI tools are implemented. In version 2.0 the core ansible got rewritten and the API was mostly rewritten. -:.. note:: Ansible relies on forking processes, as such the API is not thread safe. +.. note:: Ansible relies on forking processes, as such the API is not thread safe. .. _python_api_20: From 197bed6fd8be7626e480fff32fbb41375ebc1b55 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 2 Feb 2016 13:13:02 -0500 Subject: [PATCH 0528/1113] make executable setting connection dependant winrm shoudl not use executable, rest should? fixes #14233 --- lib/ansible/plugins/action/__init__.py | 2 +- lib/ansible/plugins/connection/__init__.py | 1 + lib/ansible/plugins/connection/winrm.py | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 62a2e7806f0..d201850d640 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -505,7 +505,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): replacement strategy (python3 could use surrogateescape) ''' - if executable is not None: + if executable is not None and self._connection.allow_executable: cmd = executable + ' -c ' + pipes.quote(cmd) display.debug("_low_level_execute_command(): starting") diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index bea8e5b426b..f101c07d9f9 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -60,6 +60,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): # as discovered by the specified file extension. An empty string as the # language means any language. module_implementation_preferences = ('',) + allow_executable = True def __init__(self, play_context, new_stdin, *args, **kwargs): # All these hasattrs allow subclasses to override these parameters diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index c291e21782c..5d731c31dd8 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -63,6 +63,7 @@ class Connection(ConnectionBase): module_implementation_preferences = ('.ps1', '') become_methods = [] + allow_executable = False def __init__(self, *args, **kwargs): From 8d4bc2003f4b157bb40b61cdcc115388e645d5a7 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 1 Feb 2016 16:55:40 -0500 Subject: [PATCH 0529/1113] better fix for ansible-pull inventory handling now it mirrors what it did in previous versions and properly uses inventory and limit options --- lib/ansible/cli/__init__.py | 2 +- lib/ansible/cli/pull.py | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index b9d320cec7a..ed4a2dd5db8 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -224,7 +224,7 @@ class CLI(object): if inventory_opts: parser.add_option('-i', '--inventory-file', dest='inventory', - help="specify inventory host path (default=%s) or comma separated host list" % C.DEFAULT_HOST_LIST, + help="specify inventory host path (default=%s) or comma separated host list." % C.DEFAULT_HOST_LIST, default=C.DEFAULT_HOST_LIST, action="callback", callback=CLI.expand_tilde, type=str) parser.add_option('--list-hosts', dest='listhosts', action='store_true', help='outputs a list of matching hosts; does not execute anything else') diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index e099555f3ec..f9a131be433 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -92,6 +92,9 @@ class PullCLI(CLI): help='verify GPG signature of checked out commit, if it fails abort running the playbook.' ' This needs the corresponding VCS module to support such an operation') + # for pull we don't wan't a default + self.parser.set_defaults(inventory=None) + self.options, self.args = self.parser.parse_args(self.args[1:]) if not self.options.dest: @@ -136,8 +139,8 @@ class PullCLI(CLI): base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ]) # Attempt to use the inventory passed in as an argument - # It might not yet have been downloaded so use localhost if note - if not self.options.inventory or not os.path.exists(self.options.inventory): + # It might not yet have been downloaded so use localhost as default + if not self.options.inventory or ( ',' not in self.options.inventory and not os.path.exists(self.options.inventory)): inv_opts = 'localhost,' else: inv_opts = self.options.inventory @@ -166,7 +169,7 @@ class PullCLI(CLI): bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) # hardcode local and inventory/host as this is just meant to fetch the repo - cmd = '%s/ansible -i "localhost," -c local %s -m %s -a "%s" all' % (bin_path, base_opts, self.options.module_name, repo_opts) + cmd = '%s/ansible -i "%s" %s -m %s -a "%s" all -l "%s"' % (bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts) for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev @@ -208,6 +211,8 @@ class PullCLI(CLI): cmd += ' -t "%s"' % self.options.tags if self.options.subset: cmd += ' -l "%s"' % self.options.subset + else: + cmd += ' -l "%s"' % limit_opts os.chdir(self.options.dest) From 5b1d8cfd5c401ee29f09bcf7924e5e6f7b46f8f7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 2 Feb 2016 11:46:09 -0800 Subject: [PATCH 0530/1113] Establish sh as the default shell plugin. This is a fix for one of the problems pointed out in #14176 --- lib/ansible/plugins/connection/__init__.py | 7 ++++++- lib/ansible/plugins/shell/csh.py | 5 +++++ lib/ansible/plugins/shell/fish.py | 5 +++++ lib/ansible/plugins/shell/powershell.py | 7 +++++++ lib/ansible/plugins/shell/sh.py | 7 +++++++ 5 files changed, 30 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index bea8e5b426b..2dfb2229107 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -83,7 +83,12 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): elif hasattr(self, '_shell_type'): shell_type = getattr(self, '_shell_type') else: - shell_type = os.path.basename(C.DEFAULT_EXECUTABLE) + shell_type = 'sh' + shell_filename = os.path.basename(C.DEFAULT_EXECUTABLE) + for shell in shell_loader.all(): + if shell_filename in shell.COMPATIBLE_SHELLS: + shell_type = shell.SHELL_FAMILY + break self._shell = shell_loader.get(shell_type) if not self._shell: diff --git a/lib/ansible/plugins/shell/csh.py b/lib/ansible/plugins/shell/csh.py index bd210f12feb..6f1008be012 100644 --- a/lib/ansible/plugins/shell/csh.py +++ b/lib/ansible/plugins/shell/csh.py @@ -21,6 +21,11 @@ from ansible.plugins.shell.sh import ShellModule as ShModule class ShellModule(ShModule): + # Common shell filenames that this plugin handles + COMPATIBLE_SHELLS = frozenset(('csh', 'tcsh')) + # Family of shells this has. Must match the filename without extension + SHELL_FAMILY = 'csh' + # How to end lines in a python script one-liner _SHELL_EMBEDDED_PY_EOL = '\\\n' _SHELL_REDIRECT_ALLNULL = '>& /dev/null' diff --git a/lib/ansible/plugins/shell/fish.py b/lib/ansible/plugins/shell/fish.py index 342de99e5f8..aee4cf0867b 100644 --- a/lib/ansible/plugins/shell/fish.py +++ b/lib/ansible/plugins/shell/fish.py @@ -21,6 +21,11 @@ from ansible.plugins.shell.sh import ShellModule as ShModule class ShellModule(ShModule): + # Common shell filenames that this plugin handles + COMPATIBLE_SHELLS = frozenset(('fish',)) + # Family of shells this has. Must match the filename without extension + SHELL_FAMILY = 'fish' + _SHELL_AND = '; and' _SHELL_OR = '; or' _SHELL_SUB_LEFT = '(' diff --git a/lib/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py index 096a0cf95d6..acde565e2fc 100644 --- a/lib/ansible/plugins/shell/powershell.py +++ b/lib/ansible/plugins/shell/powershell.py @@ -36,6 +36,13 @@ if _powershell_version: class ShellModule(object): + # Common shell filenames that this plugin handles + # Powershell is handled differently. It's selected when winrm is the + # connection + COMPATIBLE_SHELLS = frozenset() + # Family of shells this has. Must match the filename without extension + SHELL_FAMILY = 'powershell' + def env_prefix(self, **kwargs): return '' diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index 8b20338a603..6fbb7439cc5 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -30,6 +30,13 @@ _USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$') class ShellModule(object): + # Common shell filenames that this plugin handles. + # Note: sh is the default shell plugin so this plugin may also be selected + # if the filename is not listed in any Shell plugin. + COMPATIBLE_SHELLS = frozenset(('sh', 'zsh', 'bash', 'dash', 'ksh')) + # Family of shells this has. Must match the filename without extension + SHELL_FAMILY = 'sh' + # How to end lines in a python script one-liner _SHELL_EMBEDDED_PY_EOL = '\n' _SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1' From 42e312d3bd0516ceaf2b4533ac643bd9e05163cd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 2 Feb 2016 12:38:21 -0800 Subject: [PATCH 0531/1113] Change $() into backticks. Backticks should work for both POSIX and non-POSIX compatible shells. Fixes #14176 --- lib/ansible/plugins/shell/sh.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index 6fbb7439cc5..1e69665c0f7 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -42,8 +42,8 @@ class ShellModule(object): _SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1' _SHELL_AND = '&&' _SHELL_OR = '||' - _SHELL_SUB_LEFT = '"$(' - _SHELL_SUB_RIGHT = ')"' + _SHELL_SUB_LEFT = '"`' + _SHELL_SUB_RIGHT = '`"' _SHELL_GROUP_LEFT = '(' _SHELL_GROUP_RIGHT = ')' From 5cd3f71792676c9605101c699fb2688777fc5a5e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 2 Feb 2016 13:00:24 -0800 Subject: [PATCH 0532/1113] Handle utf-8 in module short desc --- hacking/module_formatter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 4c94ca3f2c4..43b76ae71be 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -38,6 +38,7 @@ from six import iteritems from ansible.utils import module_docs from ansible.utils.vars import merge_hash +from ansible.utils.unicode import to_bytes from ansible.errors import AnsibleError ##################################################################################### @@ -343,7 +344,7 @@ def print_modules(module, category_file, deprecated, core, options, env, templat result = process_module(modname, options, env, template, outputname, module_map, aliases) if result != "SKIPPED": - category_file.write(" %s - %s <%s_module>\n" % (modstring, rst_ify(result), module)) + category_file.write(" %s - %s <%s_module>\n" % (to_bytes(modstring), to_bytes(rst_ify(result)), to_bytes(module))) def process_category(category, categories, options, env, template, outputname): From 84f8241ff88eb0e2ab7355811f8e846da5d35031 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 2 Feb 2016 16:30:58 -0500 Subject: [PATCH 0533/1113] tag the setup task with always fixes #14228 --- lib/ansible/executor/play_iterator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index e46a8d15077..8c6fc294939 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -137,6 +137,7 @@ class PlayIterator: setup_block = Block(play=self._play) setup_task = Task(block=setup_block) setup_task.action = 'setup' + setup_task.tags = ['always'] setup_task.args = {} setup_task.set_loader(self._play._loader) setup_block.block = [setup_task] From 4bda4a13e514e9407581a585aa8cc1c7159407d3 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Tue, 19 Jan 2016 19:25:52 -0500 Subject: [PATCH 0534/1113] initial add of action plugin net_template The net_config local action handles templating for network configuration file. It will also allow network device configurations to be backed up to the control host Note: this plugin was originally named net_config but has been refactored to net_template --- lib/ansible/plugins/action/net_template.py | 93 ++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 lib/ansible/plugins/action/net_template.py diff --git a/lib/ansible/plugins/action/net_template.py b/lib/ansible/plugins/action/net_template.py new file mode 100644 index 00000000000..76d7a3c6f03 --- /dev/null +++ b/lib/ansible/plugins/action/net_template.py @@ -0,0 +1,93 @@ +# +# Copyright 2015 Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import os +import time +import glob +import urlparse + +from ansible.plugins.action import ActionBase +from ansible.utils.boolean import boolean +from ansible.utils.unicode import to_unicode + +BOOLEANS = ('true', 'false', 'yes', 'no') + +class ActionModule(ActionBase): + + TRANSFERS_FILES = False + + def run(self, tmp=None, task_vars=None): + result = super(ActionModule, self).run(tmp, task_vars) + result['changed'] = False + + try: + self._handle_template() + except ValueError as exc: + return dict(failed=True, msg=exc.message) + + result.update(self._execute_module(module_name=self._task.action, + module_args=self._task.args, task_vars=task_vars)) + + if self._task.args.get('backup'): + self._write_backup(task_vars['inventory_hostname'], result['_backup']) + + if '_backup' in result: + del result['_backup'] + + return result + + def _write_backup(self, host, contents): + if not os.path.exists('backup'): + os.mkdir('backup') + for fn in glob.glob('backup/%s*' % host): + os.remove(fn) + tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time())) + filename = 'backup/%s_config.%s' % (host, tstamp) + open(filename, 'w').write(contents) + + def _handle_template(self): + src = self._task.args.get('src') + + if os.path.isabs(src) or urlparse.urlsplit('src').scheme: + source = src + + elif self._task._role is not None: + source = self._loader.path_dwim_relative(self._task._role._role_path, 'templates', src) + if not source: + source = self._loader.path_dwim_relative(self._task._role._role_path, src) + else: + source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'templates', src) + if not source: + source = self._loader.path_dwim_relative(self._loader.get_basedir(), src) + + if not os.path.exists(source): + return + + try: + with open(source, 'r') as f: + template_data = to_unicode(f.read()) + except IOError: + return dict(failed=True, msg='unable to load src file') + + self._task.args['src'] = self._templar.template(template_data) + + From 67594e8ec203c029f31fc7c06238d571d9764e3a Mon Sep 17 00:00:00 2001 From: Tobias Wolf <towolf@gmail.com> Date: Wed, 27 Jan 2016 14:04:07 +0100 Subject: [PATCH 0535/1113] Fix handling of difflist containing multiple before/after pairs Commit ansible/ansible@c337293 introduced a difflist feature. The return value was not adequately outdented to append any diff after the first. --- lib/ansible/plugins/callback/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index bb24577d648..3a1e5f8caee 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -106,7 +106,6 @@ class CallbackBase: try: with warnings.catch_warnings(): warnings.simplefilter('ignore') - ret = [] if 'dst_binary' in diff: ret.append("diff skipped: destination file appears to be binary\n") if 'src_binary' in diff: @@ -133,9 +132,9 @@ class CallbackBase: ret.append('\n') if 'prepared' in diff: ret.append(to_unicode(diff['prepared'])) - return u"".join(ret) except UnicodeDecodeError: ret.append(">> the files are different, but the diff library cannot compare unicode strings\n\n") + return u"".join(ret) def _get_item(self, result): if result.get('_ansible_no_log', False): From 28169492f9edaafdcc62e0d69d9b975cf56064bb Mon Sep 17 00:00:00 2001 From: Tobias Wolf <towolf@gmail.com> Date: Wed, 3 Feb 2016 16:14:22 +0100 Subject: [PATCH 0536/1113] Do not insert newlines when diff is empty --- lib/ansible/plugins/callback/__init__.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 3a1e5f8caee..1fa6c03753b 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -127,14 +127,22 @@ class CallbackBase: after_header = "after: %s" % diff['after_header'] else: after_header = 'after' - differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10) - ret.extend(list(differ)) - ret.append('\n') + differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), + to_unicode(diff['after']).splitlines(True), + fromfile=before_header, + tofile=after_header, + fromfiledate='', + tofiledate='', + n=10) + difflines = list(differ) + if difflines: + ret.extend(difflines) + ret.append('\n') if 'prepared' in diff: ret.append(to_unicode(diff['prepared'])) except UnicodeDecodeError: ret.append(">> the files are different, but the diff library cannot compare unicode strings\n\n") - return u"".join(ret) + return u''.join(ret) def _get_item(self, result): if result.get('_ansible_no_log', False): From c902f926ca24b9cfd5e89391c2ffd90b8f995ccb Mon Sep 17 00:00:00 2001 From: Tobias Wolf <towolf@gmail.com> Date: Wed, 3 Feb 2016 16:18:02 +0100 Subject: [PATCH 0537/1113] Do not print empty diffs with _display.display() --- lib/ansible/plugins/callback/default.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index dfad6579343..df32112cb39 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -138,9 +138,13 @@ class CallbackModule(CallbackBase): if result._task.loop and 'results' in result._result: for res in result._result['results']: if 'diff' in res and res['diff']: - self._display.display(self._get_diff(res['diff'])) + diff = self._get_diff(res['diff']) + if diff: + self._display.display(diff) elif 'diff' in result._result and result._result['diff']: - self._display.display(self._get_diff(result._result['diff'])) + diff = self._get_diff(result._result['diff']) + if diff: + self._display.display(diff) def v2_playbook_item_on_ok(self, result): From dc6f0c12900d532aa78ca876fcd1f1269f02c2ff Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 3 Feb 2016 10:55:09 -0500 Subject: [PATCH 0538/1113] avoid logging color codes --- lib/ansible/utils/display.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index cd91af1a4f2..296a7b81806 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -111,6 +111,7 @@ class Display: # FIXME: this needs to be implemented #msg = utils.sanitize_output(msg) + nocolor = msg if color: msg = stringc(msg, color) @@ -135,7 +136,7 @@ class Display: sys.stderr.flush() if logger and not screen_only: - msg2 = msg.lstrip(u'\n') + msg2 = nocolor.lstrip(u'\n') msg2 = to_bytes(msg2) if sys.version_info >= (3,): From dee5dba82aa7ae9fb8da9d00df90110f1027e1aa Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Wed, 3 Feb 2016 01:52:17 +0100 Subject: [PATCH 0539/1113] Handle when the lookup-plugin is used in jinja-style This fixes #14190. --- lib/ansible/plugins/lookup/first_found.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/lookup/first_found.py b/lib/ansible/plugins/lookup/first_found.py index 915363180d2..8145c72b020 100644 --- a/lib/ansible/plugins/lookup/first_found.py +++ b/lib/ansible/plugins/lookup/first_found.py @@ -171,7 +171,7 @@ class LookupModule(LookupBase): else: total_search.append(term) else: - total_search = terms + total_search = self._flatten(terms) roledir = variables.get('roledir') for fn in total_search: From dc48d27dd2c49e95a719d7f7596f0b68e0ddef7f Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Wed, 3 Feb 2016 19:11:55 +0100 Subject: [PATCH 0540/1113] Defined JSON booleans in global context for python eval() We define 'false' and 'true' as variables so that python eval() recognizes them as False and True. This fixes #14291. --- lib/ansible/template/safe_eval.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py index ffb48611e27..174724c7bab 100644 --- a/lib/ansible/template/safe_eval.py +++ b/lib/ansible/template/safe_eval.py @@ -41,6 +41,13 @@ def safe_eval(expr, locals={}, include_exceptions=False): http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe ''' + # define certain JSON types + # eg. JSON booleans are unknown to python eval() + JSON_TYPES = { + 'false': False, + 'true': True, + } + # this is the whitelist of AST nodes we are going to # allow in the evaluation. Any node type other than # those listed here will raise an exception in our custom @@ -116,7 +123,7 @@ def safe_eval(expr, locals={}, include_exceptions=False): parsed_tree = ast.parse(expr, mode='eval') cnv.visit(parsed_tree) compiled = compile(parsed_tree, expr, 'eval') - result = eval(compiled, {}, dict(locals)) + result = eval(compiled, JSON_TYPES, dict(locals)) if include_exceptions: return (result, None) From c433289a8b3d12950a3c42e5336ce62f127b5daf Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 3 Feb 2016 14:33:25 -0500 Subject: [PATCH 0541/1113] clarified how block applies directives to tasks previous text could be interpreted as the block itself doing the evaluation and skipping --- docsite/rst/playbooks_blocks.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_blocks.rst b/docsite/rst/playbooks_blocks.rst index 7dce94b6d72..d0564345ef2 100644 --- a/docsite/rst/playbooks_blocks.rst +++ b/docsite/rst/playbooks_blocks.rst @@ -27,8 +27,9 @@ to the tasks. become_user: root -In the example above the 3 tasks will be executed only when the block's when condition is met and enables -privilege escalation for all the enclosed tasks. +In the example above the each of the 3 tasks will be executed after appending the `when` condition from the block +and evaluating it in the task's context. Also they inherit the privilege escalation directives enabling "become to root" +for all the enclosed tasks. .. _block_error_handling: From da9ae14ff6ca488a5cca1d68171eb0ad5c747c8d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 3 Feb 2016 14:52:29 -0500 Subject: [PATCH 0542/1113] Adding new feature - meta: clear_host_errors Allows for the clearing of any failed/unreachable hosts --- lib/ansible/plugins/strategy/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 27e7b2f9741..cb34e4035a4 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -656,5 +656,10 @@ class StrategyBase: self._inventory.refresh_inventory() #elif meta_action == 'reset_connection': # connection_info.connection.close() + elif meta_action == 'clear_host_errors': + self._tqm._failed_hosts = dict() + self._tqm._unreachable_hosts = dict() + for host in iterator._host_states: + iterator._host_states[host].fail_state = iterator.FAILED_NONE else: raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) From f3b691ccafc6c8cec24e670f7b40c58527222063 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 3 Feb 2016 14:53:16 -0500 Subject: [PATCH 0543/1113] Adding .retry files to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 01a802805e8..a684ea05535 100644 --- a/.gitignore +++ b/.gitignore @@ -47,6 +47,7 @@ deb-build *.swo credentials.yml # test output +*.retry .coverage .tox results.xml From f4bd3be71fb17360a91d7e1497776999d0baff78 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 3 Feb 2016 14:53:47 -0500 Subject: [PATCH 0544/1113] Adding block integration tests --- test/integration/Makefile | 9 +- test/integration/block_test.out | 63 +++++++++++ test/integration/test_blocks/block_test.out | 57 ++++++++++ test/integration/test_blocks/fail.yml | 1 + test/integration/test_blocks/main.yml | 101 ++++++++++++++++++ test/integration/test_blocks/nested_fail.yml | 3 + .../test_blocks/nested_nested_fail.yml | 3 + 7 files changed, 236 insertions(+), 1 deletion(-) create mode 100644 test/integration/block_test.out create mode 100644 test/integration/test_blocks/block_test.out create mode 100644 test/integration/test_blocks/fail.yml create mode 100644 test/integration/test_blocks/main.yml create mode 100644 test/integration/test_blocks/nested_fail.yml create mode 100644 test/integration/test_blocks/nested_nested_fail.yml diff --git a/test/integration/Makefile b/test/integration/Makefile index 8f2e880305b..28f7224eb92 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -21,7 +21,7 @@ VAULT_PASSWORD_FILE = vault-password CONSUL_RUNNING := $(shell python consul_running.py) -all: parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log +all: parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log parsing: ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5 @@ -127,6 +127,13 @@ test_tags: # Skip one tag [ "$$(ansible-playbook --list-tasks --skip-tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_always_tag TAGS: [always] Task_without_tag TAGS: []" ] +blocks: + # remove old output log + rm -f block_test.out + # run test + ansible-playbook -vv test_blocks/main.yml | tee block_test.out + # ensure each test ran + [ "$$(grep 'TEST COMPLETE' block_test.out | wc -l)" == "$$(egrep '^[0-9]+ plays in' block_test.out | cut -f1 -d' ')" ] cloud: amazon rackspace azure diff --git a/test/integration/block_test.out b/test/integration/block_test.out new file mode 100644 index 00000000000..7ba056ae58d --- /dev/null +++ b/test/integration/block_test.out @@ -0,0 +1,63 @@ +Using /etc/ansible/ansible.cfg as config file +2 plays in test_blocks/main.yml + +PLAY [simple block test] ******************************************************* + +TASK [setup] ******************************************************************* +ok: [localhost] + +TASK [set block tasks run flag] ************************************************ +ok: [localhost] => {"ansible_facts": {"block_tasks_run": true}, "changed": false} + +TASK [fail in tasks] *********************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Failed as requested from task"} + +TASK [set block rescue run flag] *********************************************** +ok: [localhost] => {"ansible_facts": {"block_rescue_run": true}, "changed": false} + +TASK [fail in rescue] ********************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Failed as requested from task"} + +TASK [set block always run flag] *********************************************** +ok: [localhost] => {"ansible_facts": {"block_always_run": true}, "changed": false} + +TASK [set nested block always run flag] **************************************** +ok: [localhost] => {"ansible_facts": {"nested_block_always_run": true}, "changed": false} + +TASK [fail in always] ********************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Failed as requested from task"} + +TASK [assert] ****************************************************************** +ok: [localhost] => {"changed": false, "msg": "all assertions passed"} + +TASK [debug] ******************************************************************* +ok: [localhost] => { + "msg": "TEST COMPLETE" +} + +PLAY [block with includes] ***************************************************** + +TASK [setup] ******************************************************************* +ok: [localhost] + +TASK [include fail.yml in tasks] *********************************************** +included: /data/devel/ansible/test/integration/test_blocks/fail.yml for localhost + +TASK [fail] ******************************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "failed from tasks"} + +TASK [set_fact] **************************************************************** +ok: [localhost] => {"ansible_facts": {"rescue_run_after_include_fail": true}, "changed": false} + +TASK [include fail.yml in rescue] ********************************************** +included: /data/devel/ansible/test/integration/test_blocks/fail.yml for localhost + +TASK [fail] ******************************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "failed from rescue"} + +TASK [set_fact] **************************************************************** +ok: [localhost] => {"ansible_facts": {"always_run_after_include_fail_in_rescue": true}, "changed": false} + +PLAY RECAP ********************************************************************* +localhost : ok=12 changed=0 unreachable=0 failed=3 + diff --git a/test/integration/test_blocks/block_test.out b/test/integration/test_blocks/block_test.out new file mode 100644 index 00000000000..7c17d0094bf --- /dev/null +++ b/test/integration/test_blocks/block_test.out @@ -0,0 +1,57 @@ +Using /etc/ansible/ansible.cfg as config file +2 plays in test.yml + +PLAY [simple block test] ******************************************************* + +TASK [setup] ******************************************************************* +ok: [localhost] + +TASK [set block tasks run flag] ************************************************ +ok: [localhost] => {"ansible_facts": {"block_tasks_run": true}, "changed": false} + +TASK [fail in tasks] *********************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Failed as requested from task"} + +TASK [set block rescue run flag] *********************************************** +ok: [localhost] => {"ansible_facts": {"block_rescue_run": true}, "changed": false} + +TASK [fail in rescue] ********************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Failed as requested from task"} + +TASK [set block always run flag] *********************************************** +ok: [localhost] => {"ansible_facts": {"block_always_run": true}, "changed": false} + +TASK [set nested block always run flag] **************************************** +ok: [localhost] => {"ansible_facts": {"nested_block_always_run": true}, "changed": false} + +TASK [fail in always] ********************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Failed as requested from task"} + +TASK [assert] ****************************************************************** +ok: [localhost] => {"changed": false, "msg": "all assertions passed"} + +TASK [debug] ******************************************************************* +ok: [localhost] => { + "msg": "TEST COMPLETE" +} + +PLAY [block with includes] ***************************************************** + +TASK [setup] ******************************************************************* +ok: [localhost] + +TASK [include fail.yml in tasks] *********************************************** +included: /root/testing/test_blocks/fail.yml for localhost + +TASK [fail] ******************************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "failed from tasks"} + +TASK [set_fact] **************************************************************** +ok: [localhost] => {"ansible_facts": {"rescue_run_after_include_fail": true}, "changed": false} + +TASK [set_fact] **************************************************************** +ok: [localhost] => {"ansible_facts": {"always_run_after_include_fail_in_rescue": true}, "changed": false} + +PLAY RECAP ********************************************************************* +localhost : ok=11 changed=0 unreachable=0 failed=2 + diff --git a/test/integration/test_blocks/fail.yml b/test/integration/test_blocks/fail.yml new file mode 100644 index 00000000000..ca8575a907e --- /dev/null +++ b/test/integration/test_blocks/fail.yml @@ -0,0 +1 @@ +- fail: msg="{{msg}}" diff --git a/test/integration/test_blocks/main.yml b/test/integration/test_blocks/main.yml new file mode 100644 index 00000000000..1e0641ba00a --- /dev/null +++ b/test/integration/test_blocks/main.yml @@ -0,0 +1,101 @@ +- name: simple block test + hosts: localhost + gather_facts: yes + vars: + block_tasks_run: false + block_rescue_run: false + block_always_run: false + nested_block_always_run: false + tasks_run_after_failure: false + rescue_run_after_failure: false + always_run_after_failure: false + tasks: + - block: + - name: set block tasks run flag + set_fact: + block_tasks_run: true + - name: fail in tasks + fail: + - name: tasks flag should not be set after failure + set_fact: + tasks_run_after_failure: true + rescue: + - name: set block rescue run flag + set_fact: + block_rescue_run: true + - name: fail in rescue + fail: + - name: tasks flag should not be set after failure in rescue + set_fact: + rescue_run_after_failure: true + always: + - name: set block always run flag + set_fact: + block_always_run: true + - block: + - meta: noop + always: + - name: set nested block always run flag + set_fact: + nested_block_always_run: true + - name: fail in always + fail: + - name: tasks flag should not be set after failure in always + set_fact: + always_run_after_failure: true + - meta: clear_host_errors + + post_tasks: + - assert: + that: + - block_tasks_run + - block_rescue_run + - block_always_run + - nested_block_always_run + - not tasks_run_after_failure + - not rescue_run_after_failure + - not always_run_after_failure + - debug: msg="TEST COMPLETE" + +- name: block with includes + hosts: localhost + gather_facts: yes + vars: + rescue_run_after_include_fail: false + always_run_after_include_fail_in_rescue: false + tasks_run_after_failure: false + rescue_run_after_failure: false + always_run_after_failure: false + tasks: + - block: + - name: include fail.yml in tasks + include: fail.yml + args: + msg: "failed from tasks" + - name: tasks flag should not be set after failure + set_fact: + tasks_run_after_failure: true + rescue: + - set_fact: + rescue_run_after_include_fail: true + - name: include fail.yml in rescue + include: fail.yml + args: + msg: "failed from rescue" + - name: tasks flag should not be set after failure in rescue + set_fact: + rescue_run_after_failure: true + always: + - set_fact: + always_run_after_include_fail_in_rescue: true + - meta: clear_host_errors + + post_tasks: + - assert: + that: + - rescue_run_after_include_fail + - always_run_after_include_fail_in_rescue + - not tasks_run_after_failure + - not rescue_run_after_failure + - not always_run_after_failure + - debug: msg="TEST COMPLETE" diff --git a/test/integration/test_blocks/nested_fail.yml b/test/integration/test_blocks/nested_fail.yml new file mode 100644 index 00000000000..31ae870e373 --- /dev/null +++ b/test/integration/test_blocks/nested_fail.yml @@ -0,0 +1,3 @@ +- include: fail.yml + args: + msg: "nested {{msg}}" diff --git a/test/integration/test_blocks/nested_nested_fail.yml b/test/integration/test_blocks/nested_nested_fail.yml new file mode 100644 index 00000000000..e9a050fb986 --- /dev/null +++ b/test/integration/test_blocks/nested_nested_fail.yml @@ -0,0 +1,3 @@ +- include: nested_fail.yml + args: + msg: "nested {{msg}}" From 5466510edb902466790426686e8abc3e0979ea62 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 3 Feb 2016 14:59:11 -0500 Subject: [PATCH 0545/1113] Removing .out file for integration tests and adding to .gitignore --- .gitignore | 1 + test/integration/block_test.out | 63 --------------------------------- 2 files changed, 1 insertion(+), 63 deletions(-) delete mode 100644 test/integration/block_test.out diff --git a/.gitignore b/.gitignore index a684ea05535..2392614453b 100644 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,7 @@ deb-build credentials.yml # test output *.retry +*.out .coverage .tox results.xml diff --git a/test/integration/block_test.out b/test/integration/block_test.out deleted file mode 100644 index 7ba056ae58d..00000000000 --- a/test/integration/block_test.out +++ /dev/null @@ -1,63 +0,0 @@ -Using /etc/ansible/ansible.cfg as config file -2 plays in test_blocks/main.yml - -PLAY [simple block test] ******************************************************* - -TASK [setup] ******************************************************************* -ok: [localhost] - -TASK [set block tasks run flag] ************************************************ -ok: [localhost] => {"ansible_facts": {"block_tasks_run": true}, "changed": false} - -TASK [fail in tasks] *********************************************************** -fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Failed as requested from task"} - -TASK [set block rescue run flag] *********************************************** -ok: [localhost] => {"ansible_facts": {"block_rescue_run": true}, "changed": false} - -TASK [fail in rescue] ********************************************************** -fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Failed as requested from task"} - -TASK [set block always run flag] *********************************************** -ok: [localhost] => {"ansible_facts": {"block_always_run": true}, "changed": false} - -TASK [set nested block always run flag] **************************************** -ok: [localhost] => {"ansible_facts": {"nested_block_always_run": true}, "changed": false} - -TASK [fail in always] ********************************************************** -fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Failed as requested from task"} - -TASK [assert] ****************************************************************** -ok: [localhost] => {"changed": false, "msg": "all assertions passed"} - -TASK [debug] ******************************************************************* -ok: [localhost] => { - "msg": "TEST COMPLETE" -} - -PLAY [block with includes] ***************************************************** - -TASK [setup] ******************************************************************* -ok: [localhost] - -TASK [include fail.yml in tasks] *********************************************** -included: /data/devel/ansible/test/integration/test_blocks/fail.yml for localhost - -TASK [fail] ******************************************************************** -fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "failed from tasks"} - -TASK [set_fact] **************************************************************** -ok: [localhost] => {"ansible_facts": {"rescue_run_after_include_fail": true}, "changed": false} - -TASK [include fail.yml in rescue] ********************************************** -included: /data/devel/ansible/test/integration/test_blocks/fail.yml for localhost - -TASK [fail] ******************************************************************** -fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "failed from rescue"} - -TASK [set_fact] **************************************************************** -ok: [localhost] => {"ansible_facts": {"always_run_after_include_fail_in_rescue": true}, "changed": false} - -PLAY RECAP ********************************************************************* -localhost : ok=12 changed=0 unreachable=0 failed=3 - From 22382726fa69f40c74611a79b99845df1bd3076f Mon Sep 17 00:00:00 2001 From: Andre keedy <andre.keedy@emc.com> Date: Wed, 3 Feb 2016 15:42:05 -0500 Subject: [PATCH 0546/1113] Add rackHd inventory script - Allow ansible to get hosts inventory from rackHD by node id --- contrib/inventory/rackhd.py | 73 +++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100755 contrib/inventory/rackhd.py diff --git a/contrib/inventory/rackhd.py b/contrib/inventory/rackhd.py new file mode 100755 index 00000000000..f27fd5905eb --- /dev/null +++ b/contrib/inventory/rackhd.py @@ -0,0 +1,73 @@ +#!/usr/bin/python +import json +import requests +import os +import argparse +import types + +MONORAIL_URL = 'http://localhost:8080' + +class OnRackInventory(object): + def __init__(self, nodeids): + self._inventory = {} + for nodeid in nodeids: + self._load_inventory_data(nodeid) + output = '{\n' + for nodeid,info in self._inventory.iteritems(): + output += self._format_output(nodeid, info) + output += ',\n' + output = output[:-2] + output += '}\n' + print output + + def _load_inventory_data(self, nodeid): + info = {} + info['ohai'] = MONORAIL_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid ) + info['lookup'] = MONORAIL_URL + '/api/common/lookups/?q={0}'.format(nodeid) + + results = {} + for key,url in info.iteritems(): + r = requests.get( url, verify=False) + results[key] = r.text + + self._inventory[nodeid] = results + + def _format_output(self, nodeid, info): + output = '' + try: + node_info = json.loads(info['lookup']) + ipaddress = '' + if len(node_info) > 0: + ipaddress = node_info[0]["ipAddress"] + output += ' "' + nodeid + '" : {\n' + output += ' "hosts": [ "' + ipaddress + '" ],\n' + output += ' "vars" : {\n' + for key,result in info.iteritems(): + output += ' "' + key + '": ' + json.dumps(json.loads(result), sort_keys=True, indent=2) + ',\n' + output += ' "ansible_ssh_user": "renasar"\n' + output += ' }\n' + output += ' }\n' + except KeyError: + pass + return output + +try: + #check if monorail url(ie:10.1.1.45:8080) is specified in the environment + MONORAIL_URL = 'http://' + str(os.environ['MONORAIL']) +except: + #use default values + pass + +# Use the nodeid specified in the environment to limit the data returned +# or return data for all available nodes +nodeids = [] +try: + nodeids += os.environ['nodeid'].split(',') +except KeyError: + url = MONORAIL_URL + '/api/common/nodes' + r = requests.get( url, verify=False) + data = json.loads(r.text) + for entry in data: + if entry['type'] == 'compute': + nodeids.append(entry['id']) +OnRackInventory(nodeids) From 083530d8faf00c84b49c3a5cff15d5d9706d4076 Mon Sep 17 00:00:00 2001 From: Andre keedy <andre.keedy@emc.com> Date: Wed, 3 Feb 2016 17:00:58 -0500 Subject: [PATCH 0547/1113] Fix erros --- contrib/inventory/rackhd.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/contrib/inventory/rackhd.py b/contrib/inventory/rackhd.py index f27fd5905eb..df74eb332b0 100755 --- a/contrib/inventory/rackhd.py +++ b/contrib/inventory/rackhd.py @@ -5,9 +5,9 @@ import os import argparse import types -MONORAIL_URL = 'http://localhost:8080' +RACKHD_URL = 'http://localhost:8080' -class OnRackInventory(object): +class RackhdInventory(object): def __init__(self, nodeids): self._inventory = {} for nodeid in nodeids: @@ -18,12 +18,12 @@ class OnRackInventory(object): output += ',\n' output = output[:-2] output += '}\n' - print output + print (output) def _load_inventory_data(self, nodeid): info = {} - info['ohai'] = MONORAIL_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid ) - info['lookup'] = MONORAIL_URL + '/api/common/lookups/?q={0}'.format(nodeid) + info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid ) + info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid) results = {} for key,url in info.iteritems(): @@ -52,8 +52,8 @@ class OnRackInventory(object): return output try: - #check if monorail url(ie:10.1.1.45:8080) is specified in the environment - MONORAIL_URL = 'http://' + str(os.environ['MONORAIL']) + #check if rackhd url(ie:10.1.1.45:8080) is specified in the environment + RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL']) except: #use default values pass @@ -64,10 +64,10 @@ nodeids = [] try: nodeids += os.environ['nodeid'].split(',') except KeyError: - url = MONORAIL_URL + '/api/common/nodes' + url = RACKHD_URL + '/api/common/nodes' r = requests.get( url, verify=False) data = json.loads(r.text) for entry in data: if entry['type'] == 'compute': nodeids.append(entry['id']) -OnRackInventory(nodeids) +RackhdInventory(nodeids) From 8039ceee0f38f5e6232960b89b396bb07042731d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 3 Feb 2016 18:42:06 -0500 Subject: [PATCH 0548/1113] Tweak block integration tests to also test the free strategy --- test/integration/Makefile | 8 ++++++-- test/integration/test_blocks/main.yml | 2 ++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 28f7224eb92..be1e153d178 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -130,9 +130,13 @@ test_tags: blocks: # remove old output log rm -f block_test.out - # run test + # run the test and check to make sure the right number of completions was logged ansible-playbook -vv test_blocks/main.yml | tee block_test.out - # ensure each test ran + [ "$$(grep 'TEST COMPLETE' block_test.out | wc -l)" == "$$(egrep '^[0-9]+ plays in' block_test.out | cut -f1 -d' ')" ] + # cleanup the output log again, to make sure the test is clean + rm -f block_test.out + # run test with free strategy and again count the completions + ansible-playbook -vv test_blocks/main.yml -e test_strategy=free | tee block_test.out [ "$$(grep 'TEST COMPLETE' block_test.out | wc -l)" == "$$(egrep '^[0-9]+ plays in' block_test.out | cut -f1 -d' ')" ] cloud: amazon rackspace azure diff --git a/test/integration/test_blocks/main.yml b/test/integration/test_blocks/main.yml index 1e0641ba00a..cb6fc66600e 100644 --- a/test/integration/test_blocks/main.yml +++ b/test/integration/test_blocks/main.yml @@ -1,6 +1,7 @@ - name: simple block test hosts: localhost gather_facts: yes + strategy: "{{test_strategy|default('linear')}}" vars: block_tasks_run: false block_rescue_run: false @@ -60,6 +61,7 @@ - name: block with includes hosts: localhost gather_facts: yes + strategy: "{{test_strategy|default('linear')}}" vars: rescue_run_after_include_fail: false always_run_after_include_fail_in_rescue: false From 699a854bf30784cd7514f4c1b6f8bd742868f7eb Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 3 Feb 2016 18:42:27 -0500 Subject: [PATCH 0549/1113] Fixing bugs in play iteration and strategies * Fixed a bug in PlayIterator when ITERATING_ALWAYS, where the block was advanced but the incorrect data structure elements were cleared * Cleaned up the logic of is_failed() in PlayIterator * Fixed a bug in the free strategy which had not been updated to use the base strategy _execute_meta() method * Stopped strategies from using is_failed() to determine if tasks should still be fetched for a host Fixes #14040 --- lib/ansible/executor/play_iterator.py | 21 +++++++++++---------- lib/ansible/plugins/strategy/free.py | 17 ++++------------- lib/ansible/plugins/strategy/linear.py | 3 +-- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index e46a8d15077..ec85ce2f33a 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -275,7 +275,7 @@ class PlayIterator: if state.pending_setup: state.pending_setup = False - if state.fail_state & self.FAILED_TASKS == self.FAILED_TASKS: + if self._check_failed_state(state): state.run_state = self.ITERATING_RESCUE elif state.cur_regular_task >= len(block.block): state.run_state = self.ITERATING_ALWAYS @@ -335,7 +335,9 @@ class PlayIterator: state.cur_rescue_task = 0 state.cur_always_task = 0 state.run_state = self.ITERATING_TASKS - state.child_state = None + state.tasks_child_state = None + state.rescue_child_state = None + state.always_child_state = None else: task = block.always[state.cur_always_task] if isinstance(task, Block) or state.always_child_state is not None: @@ -365,7 +367,7 @@ class PlayIterator: return (state, task) def _set_failed_state(self, state): - if state.pending_setup: + if state.run_state == self.ITERATING_SETUP: state.fail_state |= self.FAILED_SETUP state.run_state = self.ITERATING_COMPLETE elif state.run_state == self.ITERATING_TASKS: @@ -407,19 +409,18 @@ class PlayIterator: def _check_failed_state(self, state): if state is None: return False + elif state.fail_state != self.FAILED_NONE: + if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0 or \ + state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0: + return False + else: + return True elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state): return True elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state): return True elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state): return True - elif state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE: - if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0: - return False - elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0: - return False - else: - return True return False def is_failed(self, host): diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index da123ce3b73..17516c91bc9 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -78,7 +78,7 @@ class StrategyModule(StrategyBase): (state, task) = iterator.get_next_task_for_host(host, peek=True) display.debug("free host state: %s" % state) display.debug("free host task: %s" % task) - if not iterator.is_failed(host) and host_name not in self._tqm._unreachable_hosts and task: + if host_name not in self._tqm._unreachable_hosts and task: # set the flag so the outer loop knows we've still found # some work which needs to be done @@ -106,18 +106,7 @@ class StrategyModule(StrategyBase): continue if task.action == 'meta': - # meta tasks store their args in the _raw_params field of args, - # since they do not use k=v pairs, so get that - meta_action = task.args.get('_raw_params') - if meta_action == 'noop': - continue - elif meta_action == 'flush_handlers': - # FIXME: in the 'free' mode, flushing handlers should result in - # only those handlers notified for the host doing the flush - self.run_handlers(iterator, play_context) - else: - raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) - + self._execute_meta(task, play_context, iterator) self._blocked_hosts[host_name] = False else: # handle step if needed, skip meta actions as they are used internally @@ -126,6 +115,8 @@ class StrategyModule(StrategyBase): display.warning("Using any_errors_fatal with the free strategy is not supported, as tasks are executed independently on each host") self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False) self._queue_task(host, task, task_vars, play_context) + else: + display.debug("%s is blocked, skipping for now" % host_name) # move on to the next host and make sure we # haven't gone past the end of our hosts list diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 804cfadc776..00c8e157491 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -54,8 +54,7 @@ class StrategyModule(StrategyBase): host_tasks = {} display.debug("building list of next tasks for hosts") for host in hosts: - if not iterator.is_failed(host): - host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True) + host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True) display.debug("done building task lists") num_setups = 0 From 2e171610e06ebcb326ef69ffa89ae46ea1081418 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Thu, 4 Feb 2016 14:19:46 +0100 Subject: [PATCH 0550/1113] Also add 'null' as a possible JSON value --- lib/ansible/template/safe_eval.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py index 174724c7bab..d82373a6fb9 100644 --- a/lib/ansible/template/safe_eval.py +++ b/lib/ansible/template/safe_eval.py @@ -45,6 +45,7 @@ def safe_eval(expr, locals={}, include_exceptions=False): # eg. JSON booleans are unknown to python eval() JSON_TYPES = { 'false': False, + 'null': None, 'true': True, } From 8bc2d3be9c41348123af9ad391dd649df5664568 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Thu, 4 Feb 2016 09:58:50 -0600 Subject: [PATCH 0551/1113] Add new 'unsafe' YAML constructor --- lib/ansible/parsing/yaml/constructor.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/ansible/parsing/yaml/constructor.py b/lib/ansible/parsing/yaml/constructor.py index bac0d3f2a5a..164d23b497e 100644 --- a/lib/ansible/parsing/yaml/constructor.py +++ b/lib/ansible/parsing/yaml/constructor.py @@ -22,6 +22,7 @@ __metaclass__ = type from yaml.constructor import Constructor, ConstructorError from yaml.nodes import MappingNode from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode +from ansible.vars.unsafe_proxy import wrap_var try: from __main__ import display @@ -72,7 +73,7 @@ class AnsibleConstructor(Constructor): return mapping - def construct_yaml_str(self, node): + def construct_yaml_str(self, node, unsafe=False): # Override the default string handling function # to always return unicode objects value = self.construct_scalar(node) @@ -80,6 +81,9 @@ class AnsibleConstructor(Constructor): ret.ansible_pos = self._node_position_info(node) + if unsafe: + ret = wrap_var(ret) + return ret def construct_yaml_seq(self, node): @@ -88,6 +92,9 @@ class AnsibleConstructor(Constructor): data.extend(self.construct_sequence(node)) data.ansible_pos = self._node_position_info(node) + def construct_yaml_unsafe(self, node): + return self.construct_yaml_str(node, unsafe=True) + def _node_position_info(self, node): # the line number where the previous token has ended (plus empty lines) # Add one so that the first line is line 1 rather than line 0 @@ -121,3 +128,7 @@ AnsibleConstructor.add_constructor( AnsibleConstructor.add_constructor( u'tag:yaml.org,2002:seq', AnsibleConstructor.construct_yaml_seq) + +AnsibleConstructor.add_constructor( + u'!unsafe', + AnsibleConstructor.construct_yaml_unsafe) From 5cd4afaa1dcb0bbfebae20712b9e2f5f23582e89 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Thu, 4 Feb 2016 11:10:30 -0600 Subject: [PATCH 0552/1113] Add documentation about the unsafe YAML tag --- docsite/rst/playbooks_advanced_syntax.rst | 59 +++++++++++++++++++++++ docsite/rst/playbooks_special_topics.rst | 1 + docsite/rst/playbooks_variables.rst | 5 ++ 3 files changed, 65 insertions(+) create mode 100644 docsite/rst/playbooks_advanced_syntax.rst diff --git a/docsite/rst/playbooks_advanced_syntax.rst b/docsite/rst/playbooks_advanced_syntax.rst new file mode 100644 index 00000000000..932cfc87b96 --- /dev/null +++ b/docsite/rst/playbooks_advanced_syntax.rst @@ -0,0 +1,59 @@ +Advanced Syntax +=============== + +.. contents:: Topics + +This page describes advanced YAML syntax that enables you to have more control over the data placed in YAML files used by Ansible. + +.. _yaml_tags_and_python_types + +YAML tags and Python types +`````````````````````````` + +The documentation covered here is an extension of the documentation that can be found in the `PyYAML Documentation <http://pyyaml.org/wiki/PyYAMLDocumentation#YAMLtagsandPythontypes>`_ + +.. _unsafe_strings + +Unsafe or Raw Strings +~~~~~~~~~~~~~~~~~~~~~ + +As of Ansible 2.0, there is an internal data type for declaring variable values as "unsafe". This means that the data held within the variables value should be treated as unsafe preventing unsafe character subsitition and information disclosure. + +Jinja2 contains functionality for escaping, or telling Jinja2 to not template data by means of functionality such as ``{% raw %} ... {% endraw %}``, however this uses a more comprehensive implementation to ensure that the value is never templated. + +Using YAML tags, you can also mark a value as "unsafe" by using the ``!unsafe`` tag such as:: + + --- + my_unsafe_variable: !unsafe 'this variable has {{ characters that shouldn't be treated as a jinja2 template' + +In a playbook, this may look like:: + + --- + hosts: all + vars: + my_unsafe_variable: !unsafe 'unsafe value' + tasks: + ... + +For complex variables such as hashes or arrays, ``!unsafe`` should be used on the individual elements such as:: + + --- + my_unsafe_array: + - !unsafe 'unsafe element' + - 'safe element' + + my_unsafe_hash: + unsafe_key: !unsafe 'unsafe value' + + + +.. seealso:: + + :doc:`playbooks_variables` + All about variables + `User Mailing List <http://groups.google.com/group/ansible-project>`_ + Have a question? Stop by the google group! + `irc.freenode.net <http://irc.freenode.net>`_ + #ansible IRC chat channel + + diff --git a/docsite/rst/playbooks_special_topics.rst b/docsite/rst/playbooks_special_topics.rst index 74974cad108..943f2674eb0 100644 --- a/docsite/rst/playbooks_special_topics.rst +++ b/docsite/rst/playbooks_special_topics.rst @@ -14,6 +14,7 @@ and adopt these only if they seem relevant or useful to your environment. playbooks_delegation playbooks_environment playbooks_error_handling + playbooks_advanced_syntax playbooks_lookups playbooks_prompts playbooks_tags diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 122c0ef9232..9a9b7c64517 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -932,6 +932,11 @@ how all of these things can work together. .. _ansible-examples: https://github.com/ansible/ansible-examples .. _builtin filters: http://jinja.pocoo.org/docs/templates/#builtin-filters +Advanced Syntax +``````````````` + +For information about advanced YAML syntax used to declare variables and have more control over the data placed in YAML files used by Ansible, see `playbooks_advanced_syntax`_ + .. seealso:: :doc:`playbooks` From 70ac47ae61eb3c0cb2e33a342128ef1a7653b17b Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 3 Feb 2016 21:15:48 -0500 Subject: [PATCH 0553/1113] allow atomic_move to not be atomic ... just 'cause people build bad systems that insist on not allowing updates in an atomic manner and force us to do them in a very unsafe way that has race conditions and can lead to many issues. if using this option you should really be opening a bug report with the system that only allows for this type of update. and now i shower though i doubt i'll feel clean --- lib/ansible/module_utils/basic.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 27b4cc2f4da..c1d061eba36 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1668,7 +1668,7 @@ class AnsibleModule(object): e = get_exception() sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e)) - def atomic_move(self, src, dest): + def atomic_move(self, src, dest, unsafe_writes=False): '''atomically move src to dest, copying attributes from dest, returns true on success it uses os.rename to ensure this as it is an atomic operation, rest of the function is to work around limitations, corner cases and ensure selinux context is saved if possible''' @@ -1708,9 +1708,25 @@ class AnsibleModule(object): os.rename(src, dest) except (IOError, OSError): e = get_exception() - # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied) - # and 26 (text file busy) which happens on vagrant synced folders - if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]: + # sadly there are some situations where we cannot ensure atomicity, but only if + # the user insists and we get the appropriate error we update the file unsafely + if unsafe_writes and e.errno == errno.EBUSY: + #TODO: issue warning that this is an unsafe operation, but doing it cause user insists + try: + try: + out_dest = open(dest, 'wb') + in_src = open(src, 'rb') + shutil.copyfileobj(in_src, out_dest) + finally: # assuring closed files in 2.4 compatible way + if out_dest: + out_dest.close() + if in_src: + in_src.close() + except (shutil.Error, OSError, IOError), e: + self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e)) + elif e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]: + # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied) + # and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) dest_dir = os.path.dirname(dest) From 89c57666c3dd751076aae4cc7200ebc15907d4d4 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 4 Feb 2016 13:14:56 -0500 Subject: [PATCH 0554/1113] fixed py3 compatibility --- lib/ansible/module_utils/basic.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index c1d061eba36..70439a6c06c 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1722,8 +1722,9 @@ class AnsibleModule(object): out_dest.close() if in_src: in_src.close() - except (shutil.Error, OSError, IOError), e: - self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e)) + except (shutil.Error, OSError, IOError): + e = get_exception() + self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e)) elif e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]: # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied) # and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems From 4b7b3794c9a3876080633eae2166a741a3330b27 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 4 Feb 2016 11:10:50 -0800 Subject: [PATCH 0555/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 93d02189f6d..e1ec52e365a 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 93d02189f6dcfa0578a0fac0fb1f289369ac13a5 +Subproject commit e1ec52e365a8fbe95c83db5da3046730c4dc39b2 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index fff5ae6994f..14a62fb5d67 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit fff5ae6994fbe64d45323bc1d11f6103e211f524 +Subproject commit 14a62fb5d6771871654aedb4a36e17cf358785dc From 0766219f587fd622e7d49821be976249b58baa6f Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Thu, 4 Feb 2016 13:01:00 -0500 Subject: [PATCH 0556/1113] When in context of a role, create backup dir within role_path --- lib/ansible/plugins/action/net_template.py | 25 ++++++++++++---------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/lib/ansible/plugins/action/net_template.py b/lib/ansible/plugins/action/net_template.py index 76d7a3c6f03..ffd09324a38 100644 --- a/lib/ansible/plugins/action/net_template.py +++ b/lib/ansible/plugins/action/net_template.py @@ -55,29 +55,32 @@ class ActionModule(ActionBase): return result + def _get_working_path(self): + cwd = self._loader.get_basedir() + if self._task._role is not None: + cwd = self._task._role._role_path + return cwd + def _write_backup(self, host, contents): - if not os.path.exists('backup'): - os.mkdir('backup') - for fn in glob.glob('backup/%s*' % host): + backup_path = self._get_working_path() + '/backup' + if not os.path.exists(backup_path): + os.mkdir(backup_path) + for fn in glob.glob('%s/%s*' % (backup_path, host)): os.remove(fn) tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time())) - filename = 'backup/%s_config.%s' % (host, tstamp) + filename = '%s/%s_config.%s' % (backup_path, host, tstamp) open(filename, 'w').write(contents) def _handle_template(self): src = self._task.args.get('src') + working_path = self._get_working_path() if os.path.isabs(src) or urlparse.urlsplit('src').scheme: source = src - - elif self._task._role is not None: - source = self._loader.path_dwim_relative(self._task._role._role_path, 'templates', src) - if not source: - source = self._loader.path_dwim_relative(self._task._role._role_path, src) else: - source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'templates', src) + source = self._loader.path_dwim_relative(working_path, 'templates', src) if not source: - source = self._loader.path_dwim_relative(self._loader.get_basedir(), src) + source = self._loader.path_dwim_relative(working_path, src) if not os.path.exists(source): return From 4f61fb6cce251d40d65115e2af74b41bc21f479c Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Thu, 4 Feb 2016 14:44:22 -0500 Subject: [PATCH 0557/1113] Add safety check on _backup key. --- lib/ansible/plugins/action/net_template.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/net_template.py b/lib/ansible/plugins/action/net_template.py index ffd09324a38..c626c8dc6bc 100644 --- a/lib/ansible/plugins/action/net_template.py +++ b/lib/ansible/plugins/action/net_template.py @@ -47,7 +47,9 @@ class ActionModule(ActionBase): result.update(self._execute_module(module_name=self._task.action, module_args=self._task.args, task_vars=task_vars)) - if self._task.args.get('backup'): + if self._task.args.get('backup') and result.get('_backup'): + # User requested backup and no error occurred in module. + # NOTE: If there is a parameter error, _backup key may not be in results. self._write_backup(task_vars['inventory_hostname'], result['_backup']) if '_backup' in result: From e2a7ba35dbc2b9af5ad1858a64a7bb460e109a0b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 4 Feb 2016 12:44:45 -0800 Subject: [PATCH 0558/1113] Fix --diff to respect no_log task parameter. --- lib/ansible/plugins/action/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index d201850d640..fe8bfc398bf 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -615,4 +615,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): diff['after_header'] = 'dynamically generated' diff['after'] = source + if self._play_context.no_log and 'after' in diff: + diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]" + return diff From 11522b22c3327457d5af8ef2eaa5781c46785c35 Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Thu, 4 Feb 2016 22:54:03 +0100 Subject: [PATCH 0559/1113] module_utils/basic: add generic method for checking for missing params when argspec can not be used. --- lib/ansible/module_utils/basic.py | 57 +++++++++++++++++++------------ 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 1366bfceb40..cf5fd2e66a5 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -3,27 +3,27 @@ # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. -# +# # Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013 # All rights reserved. # -# Redistribution and use in source and binary forms, with or without modification, +# Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # -# * Redistributions of source code must retain the above copyright +# * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # @@ -186,7 +186,7 @@ except ImportError: try: from ast import literal_eval as _literal_eval except ImportError: - # a replacement for literal_eval that works with python 2.4. from: + # a replacement for literal_eval that works with python 2.4. from: # https://mail.python.org/pipermail/python-list/2009-September/551880.html # which is essentially a cut/paste from an earlier (2.6) version of python's # ast.py @@ -944,14 +944,14 @@ class AnsibleModule(object): else: raise ValueError("bad symbolic permission for mode: %s" % mode) return new_mode - + def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode): if operator == '=': if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX - - # mask out u, g, or o permissions from current_mode and apply new permissions + + # mask out u, g, or o permissions from current_mode and apply new permissions inverse_mask = mask ^ PERM_BITS new_mode = (current_mode & inverse_mask) | mode_to_apply elif operator == '+': @@ -959,10 +959,10 @@ class AnsibleModule(object): elif operator == '-': new_mode = current_mode - (current_mode & mode_to_apply) return new_mode - + def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms): prev_mode = stat.S_IMODE(path_stat.st_mode) - + is_directory = stat.S_ISDIR(path_stat.st_mode) has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0 apply_X_permission = is_directory or has_x_permissions @@ -1480,7 +1480,7 @@ class AnsibleModule(object): raise return cwd except: - # we don't have access to the cwd, probably because of sudo. + # we don't have access to the cwd, probably because of sudo. # Try and move to a neutral location to prevent errors for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]: try: @@ -1489,9 +1489,9 @@ class AnsibleModule(object): return cwd except: pass - # we won't error here, as it may *not* be a problem, + # we won't error here, as it may *not* be a problem, # and we don't want to break modules unnecessarily - return None + return None def get_bin_path(self, arg, required=False, opt_dirs=[]): ''' @@ -1584,6 +1584,19 @@ class AnsibleModule(object): print(self.jsonify(kwargs)) sys.exit(1) + def fail_on_missing_params(self, required_params=None): + ''' This is for checking for required params when we can not check via argspec because we + need more information than is simply given in the argspec. + ''' + if not required_params: + return + missing_params = [] + for required_param in required_params: + if not self.params.get(required_param): + missing_params.append(required_param) + if missing_params: + self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params)) + def digest_from_file(self, filename, algorithm): ''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. ''' if not os.path.exists(filename): From e46074c79132e0181b5284642f8ad03faf6209ff Mon Sep 17 00:00:00 2001 From: Andre keedy <andre.keedy@emc.com> Date: Thu, 4 Feb 2016 16:58:10 -0500 Subject: [PATCH 0560/1113] Address comments --- contrib/inventory/rackhd.py | 57 ++++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/contrib/inventory/rackhd.py b/contrib/inventory/rackhd.py index df74eb332b0..e7db24d3204 100755 --- a/contrib/inventory/rackhd.py +++ b/contrib/inventory/rackhd.py @@ -1,4 +1,5 @@ #!/usr/bin/python + import json import requests import os @@ -12,13 +13,8 @@ class RackhdInventory(object): self._inventory = {} for nodeid in nodeids: self._load_inventory_data(nodeid) - output = '{\n' for nodeid,info in self._inventory.iteritems(): - output += self._format_output(nodeid, info) - output += ',\n' - output = output[:-2] - output += '}\n' - print (output) + print(json.dumps(self._format_output(nodeid, info))) def _load_inventory_data(self, nodeid): info = {} @@ -29,28 +25,29 @@ class RackhdInventory(object): for key,url in info.iteritems(): r = requests.get( url, verify=False) results[key] = r.text - self._inventory[nodeid] = results def _format_output(self, nodeid, info): - output = '' try: node_info = json.loads(info['lookup']) ipaddress = '' if len(node_info) > 0: - ipaddress = node_info[0]["ipAddress"] - output += ' "' + nodeid + '" : {\n' - output += ' "hosts": [ "' + ipaddress + '" ],\n' - output += ' "vars" : {\n' + ipaddress = node_info[0]['ipAddress'] + output = {nodeid:{ 'hosts':[ipaddress],'vars':{}}} for key,result in info.iteritems(): - output += ' "' + key + '": ' + json.dumps(json.loads(result), sort_keys=True, indent=2) + ',\n' - output += ' "ansible_ssh_user": "renasar"\n' - output += ' }\n' - output += ' }\n' + output[nodeid]['vars'][key] = json.loads(result) + output[nodeid]['vars']['ansible_ssh_user'] = 'monorail' except KeyError: pass return output + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--host') + parser.add_argument('--list', action='store_true') + return parser.parse_args() + try: #check if rackhd url(ie:10.1.1.45:8080) is specified in the environment RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL']) @@ -61,13 +58,21 @@ except: # Use the nodeid specified in the environment to limit the data returned # or return data for all available nodes nodeids = [] -try: - nodeids += os.environ['nodeid'].split(',') -except KeyError: - url = RACKHD_URL + '/api/common/nodes' - r = requests.get( url, verify=False) - data = json.loads(r.text) - for entry in data: - if entry['type'] == 'compute': - nodeids.append(entry['id']) -RackhdInventory(nodeids) + +if (parse_args().host): + try: + nodeids += parse_args().host.split(',') + RackhdInventory(nodeids) + except: + pass +if (parse_args().list): + try: + url = RACKHD_URL + '/api/common/nodes' + r = requests.get( url, verify=False) + data = json.loads(r.text) + for entry in data: + if entry['type'] == 'compute': + nodeids.append(entry['id']) + RackhdInventory(nodeids) + except: + pass From 75b6f616190857db6b816096ce39bdaf2c16822a Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 4 Feb 2016 17:15:54 -0500 Subject: [PATCH 0561/1113] Fix the way we re-add variables from PlayContext to the variable dict * If the internal value is None, do not add the variable * Make sure all aliases for a given variable name are set (if they're not already set in the dictionary) Fixes #14310 --- lib/ansible/playbook/play_context.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 1804a032c62..f7b38e9e79d 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -524,11 +524,9 @@ class PlayContext(Base): In case users need to access from the play, this is a legacy from runner. ''' - # TODO: should we be setting the more generic values here rather than - # the more specific _ssh_ ones? - for special_var in RESET_VARS: - - if special_var not in variables: - for prop, varnames in MAGIC_VARIABLE_MAPPING.items(): - if special_var in varnames: - variables[special_var] = getattr(self, prop) + for prop, var_list in MAGIC_VARIABLE_MAPPING.items(): + var_val = getattr(self, prop, None) + if var_val is not None: + for var_opt in var_list: + if var_opt not in variables: + variables[var_opt] = var_val From 5679b5414cbb2a46ca2f38acb5dec2c011cf645a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 4 Feb 2016 18:42:27 -0500 Subject: [PATCH 0562/1113] avoid errors from possible None/False args --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 70439a6c06c..cfa0e9b02b4 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1825,7 +1825,7 @@ class AnsibleModule(object): # expand things like $HOME and ~ if not shell: - args = [ os.path.expandvars(os.path.expanduser(x)) for x in args ] + args = [ os.path.expandvars(os.path.expanduser(x)) for x in args if x ] rc = 0 msg = None From 7037f793b9a90f07e51c15b2bca6be862f159ff4 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 4 Feb 2016 18:51:24 -0500 Subject: [PATCH 0563/1113] Expain our commitment to API --- docsite/rst/developing_api.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docsite/rst/developing_api.rst b/docsite/rst/developing_api.rst index c6369f77b99..d911d4b4e84 100644 --- a/docsite/rst/developing_api.rst +++ b/docsite/rst/developing_api.rst @@ -3,6 +3,13 @@ Python API .. contents:: Topics +Please note that while we make this API available it is not intended for direct consumption, it is here +for the support of the Ansible command line tools. We try not to make breaking changes but we reserve the +right to do so at any time if it makes sense for the Ansible toolset. + + +The following documentation is provided for those that still want to use the API directly, but be mindful this is not something the Ansible team supports. + There are several interesting ways to use Ansible from an API perspective. You can use the Ansible python API to control nodes, you can extend Ansible to respond to various python events, you can write various plugins, and you can plug in inventory data from external data sources. This document From d6283129e13e1be5fa41b1440a0ab8016cd1b50c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 4 Feb 2016 19:38:47 -0500 Subject: [PATCH 0564/1113] corrections and additions to porting guide fixes in syntax to what is now allowable added section from user feedback --- docsite/rst/porting_guide_2.0.rst | 59 +++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 3 deletions(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 489b74a287e..33ad202ff1d 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -71,7 +71,7 @@ uses key=value escaping which has not changed. The other option is to check for tasks: - file: args: "{{item}}" # <- args here uses the full variable syntax - with_items: my_dirs + with_items: "{{my_dirs}}" * porting task includes * More dynamic. Corner-case formats that were not supposed to work now do not, as expected. @@ -126,12 +126,12 @@ While all items listed here will show a deprecation warning message, they still Should now be:: - include: foo.yml - args: + vars: a: 1 * Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only. * Bare variables in the `environment` dictionary (for plays/tasks/etc.) are no longer supported. Variables specified there should use the full variable syntax: ‘{{foo}}’. -* Tags should no longer be specified with other parameters in a task include. Instead, they should be specified as an option on the task. +* Tags (or any directive) should no longer be specified with other parameters in a task include. Instead, they should be specified as an option on the task. For example:: - include: foo.yml tags=a,b,c @@ -144,6 +144,58 @@ Should now be:: * The first_available_file option on tasks has been deprecated. Users should use the with_first_found option or lookup (‘first_found’, …) plugin. +Other caveats +------------- + +Here are some corner cases encountered when updating, these are mostly caused by the more stringent parser validation and the capture of errors that were previouslly ignored. + +* Bad variable composition:: + + with_items: myvar_{{rest_of_name}} + + This worked 'by accident' as the errors were retemplated and ended up resolving the variable, it was never intended as valid syntax and now properly returns an error, use the following instead.:: + + with_items: "{{vars['myvar_' + res_of_name]}}" + + Or `hostvasrs[inventory_hostname]['myvar_' + rest_of_name]` if appropriate. + +* Mispelled directives:: + + - task: dostuf + became: yes + The task always ran without using privilege escalation (for that you need `become`) but was also silently ignored so the play 'ran' even though it should not, now this is a parsing error. + + +* Duplicate directives:: + + - task: dostuf + when: True + when: False + + The first `when` was ignored and only the 2nd one was used as the play ran w/o warning it was ignoring one of the directives, now this produces a parsing error. + +* Conflating variables and directives:: + + - role: {name=rosy, port=435 } + + # in tasks/main.yml + - wait_for: port={{port}} + + The `port` variable is reserved as a play/task directive for overriding the connection port, in previous versions this got conflated with a variable named `port` and was usable + later in the play, this created issues if a host tried to reconnect or was using a non caching connection. Now it will be correctly identified as a directive and the `port` variable + will appear as undefined, this now forces the use of non conflicting names and removes ambiguity when adding settings and varaibles to a role invocation.. + +* Bare operations on `with_`:: + + with_items: var1 + var2 + + An issue with the 'bare variable' features, which was supposed only tempate a single variable without the need of braces ({{ )}}, would in some versions of Ansible template full expressions. + Now you need to use proper templating and braces for all expressions everywhere except condtionals (`when`):: + + with_items: "{{var1 + var2}}" + + The bare feature itself is deprecated as an undefined variable is indistiguishable from a string which makes it dificult to display a proper error. + Porting plugins =============== @@ -207,3 +259,4 @@ Porting custom scripts Custom scripts that used the ``ansible.runner.Runner`` API in 1.x have to be ported in 2.x. Please refer to: https://github.com/ansible/ansible/blob/devel/docsite/rst/developing_api.rst + From 3d894b0f13af56860a184e92e258893ef70ccc8d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 4 Feb 2016 20:10:37 -0500 Subject: [PATCH 0565/1113] corrected mizpellz --- docsite/rst/porting_guide_2.0.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 33ad202ff1d..b2b6b15dd1c 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -157,12 +157,12 @@ Here are some corner cases encountered when updating, these are mostly caused by with_items: "{{vars['myvar_' + res_of_name]}}" - Or `hostvasrs[inventory_hostname]['myvar_' + rest_of_name]` if appropriate. + Or `hostvars[inventory_hostname]['myvar_' + rest_of_name]` if appropriate. -* Mispelled directives:: +* Misspelled directives:: - task: dostuf - became: yes + becom: yes The task always ran without using privilege escalation (for that you need `become`) but was also silently ignored so the play 'ran' even though it should not, now this is a parsing error. @@ -194,7 +194,7 @@ Here are some corner cases encountered when updating, these are mostly caused by with_items: "{{var1 + var2}}" - The bare feature itself is deprecated as an undefined variable is indistiguishable from a string which makes it dificult to display a proper error. + The bare feature itself is deprecated as an undefined variable is indistiguishable from a string which makes it difficult to display a proper error. Porting plugins =============== From 597638c4014bbee80f3f89ee17d0b995ad4b93d9 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 4 Feb 2016 20:35:12 -0500 Subject: [PATCH 0566/1113] revamped become page its not only for the 'upgrade from 1.8' anymore, become is the official way to do things and now presents as such, no need to know about previous options, but still keep info there for those that were using them.. --- docsite/rst/become.rst | 52 ++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 7597643f883..3a6d6960900 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -7,49 +7,53 @@ Ansible can use existing privilege escalation systems to allow a user to execute Become `````` -Before 1.9 Ansible mostly allowed the use of `sudo` and a limited use of `su` to allow a login/remote user to become a different user -and execute tasks, create resources with the 2nd user's permissions. As of 1.9 `become` supersedes the old sudo/su, while still -being backwards compatible. This new system also makes it easier to add other privilege escalation tools like `pbrun` (Powerbroker), -`pfexec` and others. +Ansible allows you 'become' another user, different from the user that logged into the machine (remote user). This is done existing +privilege escalation tools, which you probably already use or have configured, like 'sudo', 'su', 'pfexec', 'doas', 'pbrun' and others. -New directives --------------- +.. note:: Before 1.9 Ansible mostly allowed the use of `sudo` and a limited use of `su` to allow a login/remote user to become a different user + and execute tasks, create resources with the 2nd user's permissions. As of 1.9 `become` supersedes the old sudo/su, while still being backwards compatible. + This new system also makes it easier to add other privilege escalation tools like `pbrun` (Powerbroker), `pfexec` and others. + + +Directives +----------- +These can be set from play to task level, but are overriden by connection variables as they can be host specific. become - equivalent to adding `sudo:` or `su:` to a play or task, set to 'true'/'yes' to activate privilege escalation + set to 'true'/'yes' to activate privilege escalation. become_user - equivalent to adding 'sudo_user:' or 'su_user:' to a play or task, set to user with desired privileges + set to user with desired privileges, the user you 'become', NOT the user you login as. become_method at play or task level overrides the default method set in ansible.cfg, set to 'sudo'/'su'/'pbrun'/'pfexec'/'doas' -New ansible\_ variables ------------------------ -Each allows you to set an option per group and/or host +Connection variables +-------------------- +Each allows you to set an option per group and/or host, these are normally defined in inventory but can be used as normal variables. ansible_become - equivalent to ansible_sudo or ansible_su, allows to force privilege escalation + equivalent of the become directive, decides if privilege escalation is used or not. ansible_become_method allows to set privilege escalation method ansible_become_user - equivalent to ansible_sudo_user or ansible_su_user, allows to set the user you become through privilege escalation + allows to set the user you become through privilege escalation ansible_become_pass - equivalent to ansible_sudo_pass or ansible_su_pass, allows you to set the privilege escalation password + allows you to set the privilege escalation password New command line options ------------------------ ---ask-become-pass - ask for privilege escalation password +--ask-become-pass, -K + ask for privilege escalation password, does not imply become will be used ---become,-b +--become, -b run operations with become (no password implied) --become-method=BECOME_METHOD @@ -60,16 +64,14 @@ New command line options run operations as this user (default=root) -sudo and su still work! ------------------------ +For those from Pre 1.9 , sudo and su still work! +------------------------------------------------ -Old playbooks will not need to be changed, even though they are deprecated, sudo and su directives will continue to work though it -is recommended to move to become as they may be retired at one point. You cannot mix directives on the same object though, Ansible -will complain if you try to. - -Become will default to using the old sudo/su configs and variables if they exist, but will override them if you specify any of the -new ones. +For those using old playbooks will not need to be changed, even though they are deprecated, sudo and su directives, variables and options +will continue to work. It is recommended to move to become as they may be retired at one point. +ou cannot mix directives on the same object (become and sudo) though, Ansible will complain if you try to. +Become will default to using the old sudo/su configs and variables if they exist, but will override them if you specify any of the new ones. .. note:: Privilege escalation methods must also be supported by the connection plugin used, most will warn if they do not, some will just ignore it as they always run as root (jail, chroot, etc). From 9e27f627854550f56b14944bf2468b43db23f097 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 4 Feb 2016 23:46:44 -0500 Subject: [PATCH 0567/1113] added yumrepo to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ea4893728a..65cca29a23b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ Ansible Changes By Release * aws: ec2_vpc_dhcp_options.py * aws: ec2_vpc_net_facts * cloudstack: cs_volume +* yumrepo ####New Filters: * extract From 2f411557f59572e71b9ff89b401b052aef20c685 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag.wieers@gmail.com> Date: Fri, 5 Feb 2016 13:54:58 +0100 Subject: [PATCH 0568/1113] Fix small typo While reading one of the docs I found this small typo. --- docsite/rst/playbooks_loops.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_loops.rst b/docsite/rst/playbooks_loops.rst index 9bccfa6b64a..f6ab87beef7 100644 --- a/docsite/rst/playbooks_loops.rst +++ b/docsite/rst/playbooks_loops.rst @@ -550,7 +550,7 @@ Loops and Includes `````````````````` In 2.0 you are able to use `with_` loops and task includes (but not playbook includes), this adds the ability to loop over the set of tasks in one shot. -There are a couple of things that you need to keep in mind, a included task that has it's own `with_` loop will overwrite the value of the special `item` variable. +There are a couple of things that you need to keep in mind, a included task that has its own `with_` loop will overwrite the value of the special `item` variable. So if you want access to both the include's `item` and the current task's `item` you should use `set_fact` to create a alias to the outer one.:: From b713f1ca538b3a3cc9e6713594d570d19dd5db22 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 5 Feb 2016 10:35:50 -0500 Subject: [PATCH 0569/1113] Fix for copy test as ansible_ssh_user is not set by default now --- test/integration/roles/test_copy/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml index 8bb13b45022..d509093730e 100644 --- a/test/integration/roles/test_copy/tasks/main.yml +++ b/test/integration/roles/test_copy/tasks/main.yml @@ -166,7 +166,7 @@ # register: failed_copy - name: copy already copied directory again - copy: src=subdir dest={{output_subdir | expanduser}} owner={{ansible_ssh_user}} + copy: src=subdir dest={{output_subdir | expanduser}} owner={{ansible_ssh_user|default(omit)}} register: copy_result5 - name: assert that the directory was not changed From 27f9cc05f883f8d8ae9b2e6f87d4085c02861cef Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 5 Feb 2016 10:58:24 -0500 Subject: [PATCH 0570/1113] Make sure run_once tasks also set any_errors_fatal so all hosts fail Currently implemented only in the linear strategy (free strategy simply ignores run_once/BYPASS_HOST_LOOP actions). Fixes #14252 --- lib/ansible/plugins/strategy/linear.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 00c8e157491..c5616c917c5 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -190,9 +190,6 @@ class StrategyModule(StrategyBase): run_once = False work_to_do = True - if task.any_errors_fatal: - any_errors_fatal = True - # test to see if the task across all hosts points to an action plugin which # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we # will only send this task to the first host in the list. @@ -230,7 +227,10 @@ class StrategyModule(StrategyBase): templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") - run_once = templar.template(task.run_once) + run_once = templar.template(task.run_once) or getattr(action, 'BYPASS_HOST_LOOP', False) + + if task.any_errors_fatal or run_once: + any_errors_fatal = True if not callback_sent: display.debug("sending task start callback, copying the task so we can template it temporarily") @@ -254,7 +254,7 @@ class StrategyModule(StrategyBase): self._queue_task(host, task, task_vars, play_context) # if we're bypassing the host loop, break out now - if run_once or getattr(action, 'BYPASS_HOST_LOOP', False): + if run_once: break results += self._process_pending_results(iterator, one_pass=True) From c79d9a9c623cc650f92866052e6eeb6537f1cb58 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 5 Feb 2016 11:19:50 -0500 Subject: [PATCH 0571/1113] Handle run_once scenarios in the free strategy * Raise an error if the action is using BYPASS_HOST_LOOP, to prevent unexpected behavior from those actions * Show a warning regarding tasks marked as run_once, as the free strategy does not yet support that behavior * Minor tweak to linear strategies run_once code to make sure we don't raise an error if an action isn't found --- lib/ansible/plugins/strategy/free.py | 20 ++++++++++++++++++++ lib/ansible/plugins/strategy/linear.py | 4 ++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index 17516c91bc9..5431f96f46d 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -23,7 +23,9 @@ import time from ansible.errors import AnsibleError from ansible.playbook.included_file import IncludedFile +from ansible.plugins import action_loader from ansible.plugins.strategy import StrategyBase +from ansible.template import Templar try: from __main__ import display @@ -92,10 +94,28 @@ class StrategyModule(StrategyBase): self._blocked_hosts[host_name] = True (state, task) = iterator.get_next_task_for_host(host) + try: + action = action_loader.get(task.action, class_only=True) + except KeyError: + # we don't care here, because the action may simply not have a + # corresponding action plugin + action = None + display.debug("getting variables") task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) + self.add_tqm_variables(task_vars, play=iterator._play) + templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") + run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False) + if run_once: + if action and getattr(action, 'BYPASS_HOST_LOOP', False): + raise AnsibleError("The '%s' module bypasses the host loop, which is currently not supported in the free strategy " \ + "and would instead execute for every host in the inventory list." % task.action, obj=task._ds) + else: + display.warning("Using run_once with the free strategy is not currently supported. This task will still be " \ + "executed for every host in the inventory list.") + # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index c5616c917c5..846c3d0cf3e 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -199,7 +199,7 @@ class StrategyModule(StrategyBase): except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin - pass + action = None # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) @@ -227,7 +227,7 @@ class StrategyModule(StrategyBase): templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables") - run_once = templar.template(task.run_once) or getattr(action, 'BYPASS_HOST_LOOP', False) + run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False) if task.any_errors_fatal or run_once: any_errors_fatal = True From 0a20c53ed6e9c451a5775525c4d45912239aead1 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Fri, 5 Feb 2016 18:38:56 +0100 Subject: [PATCH 0572/1113] A few more typos I came across another typo, and fixed similar ones in the docs. --- docsite/rst/developing_test_pr.rst | 2 +- docsite/rst/intro_windows.rst | 2 +- docsite/rst/playbooks_loops.rst | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docsite/rst/developing_test_pr.rst b/docsite/rst/developing_test_pr.rst index cf3d0d7536f..1d5a67c5213 100644 --- a/docsite/rst/developing_test_pr.rst +++ b/docsite/rst/developing_test_pr.rst @@ -152,7 +152,7 @@ Once the files are in place, you can run the provided playbook (if there is one) ansible-playbook -vvv playbook_name.yml -If there's not a playbook, you may have to copy and paste playbook snippets or run a ad-hoc command that was pasted in. +If there's no playbook, you may have to copy and paste playbook snippets or run an ad-hoc command that was pasted in. Our issue template also included sections for "Expected Output" and "Actual Output", which should be used to gauge the output from the provided examples. diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index b9b195643be..08cf7cba0af 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -256,7 +256,7 @@ Developers: Supported modules and how it works Developing Ansible modules are covered in a `later section of the documentation <http://docs.ansible.com/developing_modules.html>`_, with a focus on Linux/Unix. What if you want to write Windows modules for Ansible though? -For Windows, Ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. Windows modules in the core and extras repo live in a "windows/" subdir. Custom modules can go directly into the Ansible "library/" directories or those added in ansible.cfg. Documentation lives in a a `.py` file with the same name. For example, if a module is named "win_ping", there will be embedded documentation in the "win_ping.py" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense. +For Windows, Ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. Windows modules in the core and extras repo live in a "windows/" subdir. Custom modules can go directly into the Ansible "library/" directories or those added in ansible.cfg. Documentation lives in a `.py` file with the same name. For example, if a module is named "win_ping", there will be embedded documentation in the "win_ping.py" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense. Modules (ps1 files) should start as follows:: diff --git a/docsite/rst/playbooks_loops.rst b/docsite/rst/playbooks_loops.rst index f6ab87beef7..5d88da5af8d 100644 --- a/docsite/rst/playbooks_loops.rst +++ b/docsite/rst/playbooks_loops.rst @@ -550,8 +550,8 @@ Loops and Includes `````````````````` In 2.0 you are able to use `with_` loops and task includes (but not playbook includes), this adds the ability to loop over the set of tasks in one shot. -There are a couple of things that you need to keep in mind, a included task that has its own `with_` loop will overwrite the value of the special `item` variable. -So if you want access to both the include's `item` and the current task's `item` you should use `set_fact` to create a alias to the outer one.:: +There are a couple of things that you need to keep in mind, an included task that has its own `with_` loop will overwrite the value of the special `item` variable. +So if you want access to both the include's `item` and the current task's `item` you should use `set_fact` to create an alias to the outer one.:: - include: test.yml From 9eca81537712222a49cc326efba207ce94d93d94 Mon Sep 17 00:00:00 2001 From: Tom Paine <aioue@users.noreply.github.com> Date: Fri, 5 Feb 2016 18:06:10 +0000 Subject: [PATCH 0573/1113] Update profile_tasks.md Converted to RST. --- lib/ansible/plugins/callback/profile_tasks.md | 83 +++++++++++-------- 1 file changed, 47 insertions(+), 36 deletions(-) diff --git a/lib/ansible/plugins/callback/profile_tasks.md b/lib/ansible/plugins/callback/profile_tasks.md index b52081d83bf..86d19e9bd4e 100644 --- a/lib/ansible/plugins/callback/profile_tasks.md +++ b/lib/ansible/plugins/callback/profile_tasks.md @@ -1,58 +1,69 @@ -# profile_tasks.py +profile\_tasks.py +================= + Ansible plugin for timing individual tasks and overall execution time. Mashup of 2 excellent original works: -- (https://github.com/jlafon/ansible-profile) -- (https://github.com/junaid18183/ansible_home/blob/master/ansible_plugins/callback_plugins/timestamp.py.old) -## Usage +- (https://github.com/jlafon/ansible-profile) +- (https://github.com/junaid18183/ansible_home/blob/master/ansible_plugins/callback_plugins/timestamp.py.old) -Add `profile_taks` to the `callback_whitelist` in `ansible.cfg`. +Usage +----- + +Add ``profile_taks`` to the ``callback_whitelist`` in ``ansible.cfg``. Run playbooks as normal. -## Features +Features +-------- -### Tasks +Tasks +~~~~~ Ongoing timing of each task as it happens. -Format: -`<task start timestamp> (<length of previous task>) <current elapsed playbook execution time>` +| Format: +| ``<task start timestamp> (<length of previous task>) <current elapsed playbook execution time>`` -```shell -TASK: [ensure messaging security group exists] ******************************** -Thursday 11 June 2017 22:50:53 +0100 (0:00:00.721) 0:00:05.322 ********* -ok: [localhost] +Task output example: -TASK: [ensure db security group exists] *************************************** -Thursday 11 June 2017 22:50:54 +0100 (0:00:00.558) 0:00:05.880 ********* -changed: [localhost] -``` +.. code:: shell -### Play Recap + TASK: [ensure messaging security group exists] ******************************** + Thursday 11 June 2017 22:50:53 +0100 (0:00:00.721) 0:00:05.322 ********* + ok: [localhost] -Recap includes ending timestamp, total playbook execution time and a sorted list of the top longest running tasks. + TASK: [ensure db security group exists] *************************************** + Thursday 11 June 2017 22:50:54 +0100 (0:00:00.558) 0:00:05.880 ********* + changed: [localhost] + +Play Recap +~~~~~~~~~~ + +Recap includes ending timestamp, total playbook execution time and a +sorted list of the top longest running tasks. No more wondering how old the results in a terminal window are. -```shell - ansible <args here> - <normal output here> - PLAY RECAP ******************************************************************** - Thursday 11 June 2016 22:51:00 +0100 (0:00:01.011) 0:00:43.247 ********* - =============================================================================== - really slow task | Download project packages----------------------------11.61s - security | Really slow security policies----------------------------------7.03s - common-base | Install core system dependencies----------------------------3.62s - common | Install pip------------------------------------------------------3.60s - common | Install boto-----------------------------------------------------3.57s - nginx | Install nginx-----------------------------------------------------3.41s - serf | Install system dependencies----------------------------------------3.38s - duo_security | Install Duo Unix SSH Integration---------------------------3.37s - loggly | Install TLS version----------------------------------------------3.36s -``` +.. code:: shell -## Compatibility + ansible <args here> + <normal output here> + PLAY RECAP ******************************************************************** + Thursday 11 June 2016 22:51:00 +0100 (0:00:01.011) 0:00:43.247 ********* + =============================================================================== + really slow task | Download project packages----------------------------11.61s + security | Really slow security policies----------------------------------7.03s + common-base | Install core system dependencies----------------------------3.62s + common | Install pip------------------------------------------------------3.60s + common | Install boto-----------------------------------------------------3.57s + nginx | Install nginx-----------------------------------------------------3.41s + serf | Install system dependencies----------------------------------------3.38s + duo_security | Install Duo Unix SSH Integration---------------------------3.37s + loggly | Install TLS version----------------------------------------------3.36s + +Compatibility +------------- Ansible 2.0+ From 0e57c577f42b5b28ae702d52d50dc19011071eb0 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Fri, 5 Feb 2016 12:12:04 -0600 Subject: [PATCH 0574/1113] Add RedirectHandler class and factory function for controlling redirects in urllib2 --- lib/ansible/module_utils/urls.py | 70 ++++++++++++++++++++++++++++---- 1 file changed, 63 insertions(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 41613f6cb61..a930483463b 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -417,7 +417,7 @@ class RequestWithMethod(urllib2.Request): def __init__(self, url, method, data=None, headers=None): if headers is None: headers = {} - self._method = method + self._method = method.upper() urllib2.Request.__init__(self, url, data, headers) def get_method(self): @@ -427,6 +427,55 @@ class RequestWithMethod(urllib2.Request): return urllib2.Request.get_method(self) +def RedirectHandlerFactory(follow_redirects=None): + """This is a class factory that closes over the value of + ``follow_redirects`` so that the RedirectHandler class has access to + that value without having to use globals, and potentially cause problems + where ``open_url`` or ``fetch_url`` are used multiple times in a module. + """ + + class RedirectHandler(urllib2.HTTPRedirectHandler): + """This is an implementation of a RedirectHandler to match the + functionality provided by httplib2. It will utilize the value of + ``follow_redirects`` that is passed into ``RedirectHandlerFactory`` + to determine how redirects should be handled in urllib2. + """ + + def redirect_request(self, req, fp, code, msg, hdrs, newurl): + if follow_redirects == 'urllib2': + return urllib2.HTTPRedirectHandler.redirect_request(self, req, + fp, code, + msg, hdrs, + newurl) + + if follow_redirects in [None, 'no', 'none']: + raise urllib2.HTTPError(newurl, code, msg, hdrs, fp) + + do_redirect = False + if follow_redirects in ['all', 'yes']: + do_redirect = (code >= 300 and code < 400) + + elif follow_redirects == 'safe': + m = req.get_method() + do_redirect = (code >= 300 and code < 400 and m in ('GET', 'HEAD')) + + if do_redirect: + # be conciliant with URIs containing a space + newurl = newurl.replace(' ', '%20') + newheaders = dict((k,v) for k,v in req.headers.items() + if k.lower() not in ("content-length", "content-type") + ) + return urllib2.Request(newurl, + headers=newheaders, + origin_req_host=req.get_origin_req_host(), + unverifiable=True) + else: + raise urllib2.HTTPError(req.get_full_url(), code, msg, hdrs, + fp) + + return RedirectHandler + + class SSLValidationHandler(urllib2.BaseHandler): ''' A custom handler class for SSL validation. @@ -604,7 +653,8 @@ class SSLValidationHandler(urllib2.BaseHandler): # Rewrite of fetch_url to not require the module environment def open_url(url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, - url_username=None, url_password=None, http_agent=None, force_basic_auth=False): + url_username=None, url_password=None, http_agent=None, + force_basic_auth=False, follow_redirects='urllib2'): ''' Fetches a file from an HTTP/FTP server using urllib2 ''' @@ -681,6 +731,9 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, if hasattr(socket, 'create_connection') and CustomHTTPSHandler: handlers.append(CustomHTTPSHandler) + if follow_redirects != 'urllib2': + handlers.append(RedirectHandlerFactory(follow_redirects)) + opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) @@ -750,7 +803,8 @@ def url_argument_spec(): ) def fetch_url(module, url, data=None, headers=None, method=None, - use_proxy=True, force=False, last_mod_time=None, timeout=10): + use_proxy=True, force=False, last_mod_time=None, timeout=10, + follow_redirects=False): ''' Fetches a file from an HTTP/FTP server using urllib2. Requires the module environment ''' @@ -767,14 +821,16 @@ def fetch_url(module, url, data=None, headers=None, method=None, password = module.params.get('url_password', '') http_agent = module.params.get('http_agent', None) force_basic_auth = module.params.get('force_basic_auth', '') + follow_redirects = follow_redirects or module.params.get('follow_redirects', 'urllib2') r = None info = dict(url=url) try: r = open_url(url, data=data, headers=headers, method=method, - use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, - validate_certs=validate_certs, url_username=username, - url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth) + use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, + validate_certs=validate_certs, url_username=username, + url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth, + follow_redirects=follow_redirects) info.update(r.info()) info['url'] = r.geturl() # The URL goes in too, because of redirects. info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200)) @@ -787,7 +843,7 @@ def fetch_url(module, url, data=None, headers=None, method=None, except (ConnectionError, ValueError), e: module.fail_json(msg=str(e)) except urllib2.HTTPError, e: - info.update(dict(msg=str(e), status=e.code)) + info.update(dict(msg=str(e), status=e.code, **e.info())) except urllib2.URLError, e: code = int(getattr(e, 'code', -1)) info.update(dict(msg="Request failed: %s" % str(e), status=code)) From 97e0f29a5b7109387b4ba3d954231ce25e19d722 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Fri, 5 Feb 2016 12:14:57 -0600 Subject: [PATCH 0575/1113] Add/improve tests for the uri module --- .../integration/roles/test_uri/tasks/main.yml | 44 ++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 9ce05938b62..4d8f9c7db09 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -113,7 +113,7 @@ assert: that: - "result.failed == true" - - "'certificate does not match ' in result.msg" + - "'SSL Certificate does not belong' in result.msg" - "stat_result.stat.exists == false" - name: Clean up any cruft from the results directory @@ -140,3 +140,45 @@ that: - "stat_result.stat.exists == true" - "result.changed == true" + +- name: test redirect without follow_redirects + uri: + url: 'http://httpbin.org/redirect/2' + follow_redirects: 'none' + status_code: 302 + register: result + +- name: Assert location header + assert: + that: + - 'result.location|default("") == "http://httpbin.org/relative-redirect/1"' + +- name: test basic auth + uri: + url: 'http://httpbin.org/basic-auth/user/passwd' + user: user + password: passwd + +- name: test basic forced auth + uri: + url: 'http://httpbin.org/hidden-basic-auth/user/passwd' + force_basic_auth: true + user: user + password: passwd + +- name: test PUT + uri: + url: 'http://httpbin.org/put' + method: PUT + body: 'foo=bar' + +- name: test OPTIONS + uri: + url: 'http://httpbin.org/' + method: OPTIONS + register: result + +- name: Assert we got an allow header + assert: + that: + - 'result.allow|default("") == "HEAD, OPTIONS, GET"' From ad37a9151415388b5d9072df3d638c6a222fcc87 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 5 Feb 2016 14:32:32 -0500 Subject: [PATCH 0576/1113] really only want to prevent None typep here false is sometimes needed --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index cfa0e9b02b4..ea0e429b6b4 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1825,7 +1825,7 @@ class AnsibleModule(object): # expand things like $HOME and ~ if not shell: - args = [ os.path.expandvars(os.path.expanduser(x)) for x in args if x ] + args = [ os.path.expandvars(os.path.expanduser(x)) for x in args if x is not None ] rc = 0 msg = None From 50dfd4b057478051062dc3999f616265d1d9ad54 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 5 Feb 2016 15:48:56 -0500 Subject: [PATCH 0577/1113] fixed bad tag example --- docsite/rst/playbooks_tags.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_tags.rst b/docsite/rst/playbooks_tags.rst index 7876fdc3d89..148e9dcfb80 100644 --- a/docsite/rst/playbooks_tags.rst +++ b/docsite/rst/playbooks_tags.rst @@ -36,7 +36,8 @@ You may also apply tags to roles:: And you may also tag basic include statements:: - - include: foo.yml tags=web,foo + - include: foo.yml + tags: [web,foo] Both of these apply the specified tags to every task inside the included file or role, so that these tasks can be selectively run when the playbook From 8eea1c7e01e2c17d84ea70d2a485e20de4b368a0 Mon Sep 17 00:00:00 2001 From: Felix Fontein <felix@fontein.de> Date: Sat, 6 Feb 2016 21:19:03 +0100 Subject: [PATCH 0578/1113] Allowing multiple tags to be specified in include's tags argument. --- lib/ansible/plugins/strategy/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index cb34e4035a4..34db52a77dc 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -489,7 +489,7 @@ class StrategyBase: # error so that users know not to specify them both ways tags = temp_vars.pop('tags', []) if isinstance(tags, string_types): - tags = [ tags ] + tags = tags.split(',') if len(tags) > 0: if len(b._task_include.tags) > 0: raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement", From d1c2d16706afd5a3377c606131d768fa4b4074a9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 7 Feb 2016 12:45:03 -0800 Subject: [PATCH 0579/1113] Allow setting run_command environment overrides for the life of an AnsibleModule --- lib/ansible/module_utils/basic.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index ea0e429b6b4..6ca893b51c0 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -516,6 +516,9 @@ class AnsibleModule(object): self._debug = False self._diff = False self._verbosity = 0 + # May be used to set modifications to the environment for any + # run_command invocation + self.run_command_environ_update = {} self.aliases = {} self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity'] @@ -1833,6 +1836,10 @@ class AnsibleModule(object): # Manipulate the environ we'll send to the new process old_env_vals = {} + # We can set this from both an attribute and per call + for key, val in self.run_command_environ_update.items(): + old_env_vals[key] = os.environ.get(key, None) + os.environ[key] = val if environ_update: for key, val in environ_update.items(): old_env_vals[key] = os.environ.get(key, None) @@ -1907,7 +1914,6 @@ class AnsibleModule(object): else: running = args self.log('Executing: ' + running) - cmd = subprocess.Popen(args, **kwargs) # the communication logic here is essentially taken from that From ccbc7d217bc6d19678bb76f69bb21dc4a9db7ac8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 7 Feb 2016 14:30:28 -0800 Subject: [PATCH 0580/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e1ec52e365a..009389bbb2e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e1ec52e365a8fbe95c83db5da3046730c4dc39b2 +Subproject commit 009389bbb2e472c938c63f16b5742dfb9862718b diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 14a62fb5d67..ff8806eaee8 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 14a62fb5d6771871654aedb4a36e17cf358785dc +Subproject commit ff8806eaee8780db61598b7047d0f15787223175 From 5a1887cc762c0001b6c80e97e36bd9aac05b3ae2 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 6 Feb 2016 00:53:01 -0500 Subject: [PATCH 0581/1113] correctly handle term signals - adhoc now terminates gracefully - avoid race condition on terminations by ignoring errors if worker might have been reaped between checking if active and termination call - ansible-playbook now properly exits on sigint/term - adhoc and playbook now give exceptions that we should not normally capture and rely on top level finally to reap children - handle systemexit breaks in workers - added debug to see at which frame we exit partial fix for #14346 --- lib/ansible/cli/adhoc.py | 9 +++++++++ lib/ansible/executor/playbook_executor.py | 9 +++++---- lib/ansible/executor/process/result.py | 2 +- lib/ansible/executor/process/worker.py | 4 ++-- lib/ansible/executor/task_queue_manager.py | 5 ++++- 5 files changed, 21 insertions(+), 8 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 97df8fcdbf0..faacd6c67bb 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -21,6 +21,7 @@ __metaclass__ = type ######################################################## import os +import signal from ansible import constants as C from ansible.cli import CLI @@ -88,6 +89,10 @@ class AdHocCLI(CLI): tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args)), async=async, poll=poll) ] ) + def _terminate(self, signum=None, framenum=None): + if signum is not None: + raise SystemExit("Interrupt detected, shutting down gracefully") + def run(self): ''' use Runner lib to do SSH things ''' @@ -170,6 +175,9 @@ class AdHocCLI(CLI): # now create a task queue manager to execute the play self._tqm = None try: + # Manage user interruptions + signal.signal(signal.SIGTERM, self._terminate) + self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, @@ -180,6 +188,7 @@ class AdHocCLI(CLI): run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=run_tree, ) + result = self._tqm.run(play) finally: if self._tqm: diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 30d9ad6d6b8..c1a1303a9c7 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -69,7 +69,7 @@ class PlaybookExecutor: may limit the runs to serialized groups, etc. ''' - signal.signal(signal.SIGINT, self._cleanup) + signal.signal(signal.SIGTERM, self._terminate) result = 0 entrylist = [] @@ -199,7 +199,7 @@ class PlaybookExecutor: finally: if self._tqm is not None: - self._cleanup() + self._tqm.cleanup() if self._options.syntax: display.display("No issues encountered") @@ -207,8 +207,9 @@ class PlaybookExecutor: return result - def _cleanup(self, signum=None, framenum=None): - return self._tqm.cleanup() + def _terminate(self, signum=None, framenum=None): + display.debug(framenum) + raise SystemExit("Terminating run due to external signal") def _get_serialized_batches(self, play): ''' diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 13c91b3ba77..bb4c0dd0a39 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -163,7 +163,7 @@ class ResultProcess(multiprocessing.Process): except queue.Empty: pass - except (KeyboardInterrupt, IOError, EOFError): + except (KeyboardInterrupt, SystemExit, IOError, EOFError): break except: # TODO: we should probably send a proper callback here instead of diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py index 120bd8b1414..24b9b3e5e03 100644 --- a/lib/ansible/executor/process/worker.py +++ b/lib/ansible/executor/process/worker.py @@ -132,7 +132,7 @@ class WorkerProcess(multiprocessing.Process): self._rslt_q.put(task_result, block=False) except Exception as e: - if not isinstance(e, (IOError, EOFError, KeyboardInterrupt)) or isinstance(e, TemplateNotFound): + if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] @@ -140,7 +140,7 @@ class WorkerProcess(multiprocessing.Process): self._rslt_q.put(task_result, block=False) except: debug(u"WORKER EXCEPTION: %s" % to_unicode(e)) - debug(u"WORKER EXCEPTION: %s" % to_unicode(traceback.format_exc())) + debug(u"WORKER TRACEBACK: %s" % to_unicode(traceback.format_exc())) debug("WORKER PROCESS EXITING") diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index feb0ab526f0..bed9879c421 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -253,7 +253,10 @@ class TaskQueueManager: rslt_q.close() main_q.close() if worker_prc and worker_prc.is_alive(): - worker_prc.terminate() + try: + worker_prc.terminate() + except AttributeError: + pass def clear_failed_hosts(self): self._failed_hosts = dict() From 1ea21f1f8ce3b9fa74a8803c4b87af50636445c6 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 8 Feb 2016 10:00:26 -0500 Subject: [PATCH 0582/1113] Also chmod the args file path when using become for old-style modules Fixes #14348 --- lib/ansible/plugins/action/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index fe8bfc398bf..b81fd495619 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -435,6 +435,8 @@ class ActionBase(with_metaclass(ABCMeta, object)): if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root': # deal with possible umask issues once sudo'ed to other user self._remote_chmod('a+r', remote_module_path) + if args_file_path is not None: + self._remote_chmod('a+r', args_file_path) cmd = "" in_data = None From 5a88cd8ad9ef9217f771346615d34de490ecf1ba Mon Sep 17 00:00:00 2001 From: Tom Paine <aioue@users.noreply.github.com> Date: Mon, 8 Feb 2016 15:02:50 +0000 Subject: [PATCH 0583/1113] Rename profile_tasks.md to profile_tasks.rst --- .../plugins/callback/{profile_tasks.md => profile_tasks.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename lib/ansible/plugins/callback/{profile_tasks.md => profile_tasks.rst} (100%) diff --git a/lib/ansible/plugins/callback/profile_tasks.md b/lib/ansible/plugins/callback/profile_tasks.rst similarity index 100% rename from lib/ansible/plugins/callback/profile_tasks.md rename to lib/ansible/plugins/callback/profile_tasks.rst From 6414c967e4399f38ceb344d5d944cfb8df696acb Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 8 Feb 2016 12:33:54 -0500 Subject: [PATCH 0584/1113] now check for description and listify if needed fixes #14371 --- hacking/module_formatter.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 43b76ae71be..011bfd53c4a 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -23,14 +23,9 @@ import os import glob import sys import yaml -import codecs -import json -import ast import re import optparse -import time import datetime -import subprocess import cgi import warnings from jinja2 import Environment, FileSystemLoader @@ -302,6 +297,11 @@ def process_module(module, options, env, template, outputname, module_map, alias # don't show version added information if it's too old to be called out if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']): del doc['options'][k]['version_added'] + if not 'description' in doc['options'][k]: + raise AnsibleError("Missing required description for option %s in %s " % (k, module)) + if not isinstance(doc['options'][k]['description'],list): + doc['options'][k]['description'] = [doc['options'][k]['description']] + all_keys.append(k) all_keys = sorted(all_keys) @@ -451,7 +451,6 @@ def main(): env, template, outputname = jinja2_environment(options.template_dir, options.type) categories = list_modules(options.module_dir) - last_category = None category_names = list(categories.keys()) category_names.sort() From c0ebb74ad0ee2eb210266e3610e0b44474628872 Mon Sep 17 00:00:00 2001 From: Robin Roth <robin.roth@kit.edu> Date: Mon, 8 Feb 2016 18:58:36 +0100 Subject: [PATCH 0585/1113] add ismount function from python Lib/posixpath.py needed for https://github.com/ansible/ansible-modules-core/pull/2737 --- lib/ansible/module_utils/ismount.py | 90 +++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 lib/ansible/module_utils/ismount.py diff --git a/lib/ansible/module_utils/ismount.py b/lib/ansible/module_utils/ismount.py new file mode 100644 index 00000000000..808f256c02c --- /dev/null +++ b/lib/ansible/module_utils/ismount.py @@ -0,0 +1,90 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is based on +# Lib/posixpath.py of cpython +# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +# +# 1. This LICENSE AGREEMENT is between the Python Software Foundation +# ("PSF"), and the Individual or Organization ("Licensee") accessing and +# otherwise using this software ("Python") in source or binary form and +# its associated documentation. +# +# 2. Subject to the terms and conditions of this License Agreement, PSF hereby +# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +# analyze, test, perform and/or display publicly, prepare derivative works, +# distribute, and otherwise use Python alone or in any derivative version, +# provided, however, that PSF's License Agreement and PSF's notice of copyright, +# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" +# are retained in Python alone or in any derivative version prepared by Licensee. +# +# 3. In the event Licensee prepares a derivative work that is based on +# or incorporates Python or any part thereof, and wants to make +# the derivative work available to others as provided herein, then +# Licensee hereby agrees to include in any such work a brief summary of +# the changes made to Python. +# +# 4. PSF is making Python available to Licensee on an "AS IS" +# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +# INFRINGE ANY THIRD PARTY RIGHTS. +# +# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. +# +# 6. This License Agreement will automatically terminate upon a material +# breach of its terms and conditions. +# +# 7. Nothing in this License Agreement shall be deemed to create any +# relationship of agency, partnership, or joint venture between PSF and +# Licensee. This License Agreement does not grant permission to use PSF +# trademarks or trade name in a trademark sense to endorse or promote +# products or services of Licensee, or any third party. +# +# 8. By copying, installing or otherwise using Python, Licensee +# agrees to be bound by the terms and conditions of this License +# Agreement. + +import os + + +def ismount(path): + """Test whether a path is a mount point + clone of os.path.ismount (from cpython Lib/posixpath.py) + fixed to solve https://github.com/ansible/ansible-modules-core/issues/2186 + and workaround non-fixed http://bugs.python.org/issue2466 + this should be rewritten as soon as python issue 2466 is fixed + probably check for python version and use os.path.ismount if fixed + + to remove replace in this file ismount( -> os.path.ismount( and remove this + function""" + + try: + s1 = os.lstat(path) + except OSError: + # the OSError should be handled with more care + # it could be a "permission denied" but path is still a mount + return False + else: + # A symlink can never be a mount point + if os.stat.S_ISLNK(s1.st_mode): + return False + + parent = os.path.join(path, os.path.pardir) + parent = os.path.realpath(parent) + + try: + s2 = os.lstat(parent) + except OSError: + # one should handle the returned OSError with more care to figure + # out whether this is still a mount + return False + + if s1.st_dev != s2.st_dev: + return True # path/.. on a different device as path + if s1.st_ino == s2.st_ino: + return True # path/.. is the same i-node as path, i.e. path=='/' + return False From ba51ed06cf5a541688adf4bc2fc1dc7480207e59 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 8 Feb 2016 13:05:19 -0500 Subject: [PATCH 0586/1113] changed from deprecated implicit bare var --- test/utils/ansible-playbook_integration_runner/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index f1bd26b7ead..82ec9ec9bf6 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -67,7 +67,7 @@ region: 'us-east-1' instance_ids: "{{ hostvars[item]['ec2_instance_ids'] }}" when: hostvars[item]['ec2_instance_ids'] is defined and item == inventory_hostname - with_items: groups['dynamic_hosts'] + with_items: "{{groups['dynamic_hosts']}}" - set_fact: ansible_connection: local From f3f3e3c66039cefae2c8cf932c859e6128fa5b0b Mon Sep 17 00:00:00 2001 From: Robin Roth <robin.roth@kit.edu> Date: Mon, 8 Feb 2016 19:10:04 +0100 Subject: [PATCH 0587/1113] fix typo --- lib/ansible/module_utils/ismount.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/ismount.py b/lib/ansible/module_utils/ismount.py index 808f256c02c..325a039e8e1 100644 --- a/lib/ansible/module_utils/ismount.py +++ b/lib/ansible/module_utils/ismount.py @@ -70,7 +70,7 @@ def ismount(path): return False else: # A symlink can never be a mount point - if os.stat.S_ISLNK(s1.st_mode): + if os.path.stat.S_ISLNK(s1.st_mode): return False parent = os.path.join(path, os.path.pardir) From 08b3dbcda3f28815af3adff168f67506395d3c17 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 8 Feb 2016 13:13:15 -0500 Subject: [PATCH 0588/1113] corrected several usages of deprecated bare vars --- test/integration/roles/setup_mysql_db/tasks/main.yml | 6 +++--- test/integration/roles/setup_postgresql_db/tasks/main.yml | 8 ++++---- test/integration/roles/test_copy/tasks/main.yml | 2 +- test/integration/roles/test_file/tasks/main.yml | 2 +- test/integration/roles/test_git/tasks/main.yml | 2 +- test/integration/roles/test_mysql_user/tasks/main.yml | 4 ++-- test/integration/roles/test_uri/tasks/main.yml | 8 ++++---- test/integration/roles/test_win_copy/tasks/main.yml | 2 +- test/integration/roles/test_win_file/tasks/main.yml | 2 +- 9 files changed, 18 insertions(+), 18 deletions(-) diff --git a/test/integration/roles/setup_mysql_db/tasks/main.yml b/test/integration/roles/setup_mysql_db/tasks/main.yml index 612d94f6d11..d30f6967cb4 100644 --- a/test/integration/roles/setup_mysql_db/tasks/main.yml +++ b/test/integration/roles/setup_mysql_db/tasks/main.yml @@ -28,17 +28,17 @@ - name: install mysqldb_test rpm dependencies yum: name={{ item }} state=latest - with_items: mysql_packages + with_items: "{{mysql_packages}}" when: ansible_pkg_mgr == 'yum' - name: install mysqldb_test rpm dependencies dnf: name={{ item }} state=latest - with_items: mysql_packages + with_items: "{{mysql_packages}}" when: ansible_pkg_mgr == 'dnf' - name: install mysqldb_test debian dependencies apt: name={{ item }} state=latest - with_items: mysql_packages + with_items: "{{mysql_packages}}" when: ansible_pkg_mgr == 'apt' - name: start mysql_db service if not running diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml index c25318a2adc..48f9211e1b4 100644 --- a/test/integration/roles/setup_postgresql_db/tasks/main.yml +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -10,12 +10,12 @@ # Make sure we start fresh - name: remove rpm dependencies for postgresql test package: name={{ item }} state=absent - with_items: postgresql_packages + with_items: "{{postgresql_packages}}" when: ansible_os_family == "RedHat" - name: remove dpkg dependencies for postgresql test apt: name={{ item }} state=absent - with_items: postgresql_packages + with_items: "{{postgresql_packages}}" when: ansible_pkg_mgr == 'apt' - name: remove old db (red hat) @@ -36,12 +36,12 @@ - name: install rpm dependencies for postgresql test package: name={{ item }} state=latest - with_items: postgresql_packages + with_items: "{{postgresql_packages}}" when: ansible_os_family == "RedHat" - name: install dpkg dependencies for postgresql test apt: name={{ item }} state=latest - with_items: postgresql_packages + with_items: "{{postgresql_packages}}" when: ansible_pkg_mgr == 'apt' - name: Initialize postgres (systemd) diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml index d509093730e..edae89f56ad 100644 --- a/test/integration/roles/test_copy/tasks/main.yml +++ b/test/integration/roles/test_copy/tasks/main.yml @@ -150,7 +150,7 @@ assert: that: - "{{item.stat.mode}} == 0700" - with_items: dir_stats.results + with_items: "{{dir_stats.results}}" # errors on this aren't presently ignored so this test is commented out. But it would be nice to fix. diff --git a/test/integration/roles/test_file/tasks/main.yml b/test/integration/roles/test_file/tasks/main.yml index 518f91bf744..d5ba22645d8 100644 --- a/test/integration/roles/test_file/tasks/main.yml +++ b/test/integration/roles/test_file/tasks/main.yml @@ -248,7 +248,7 @@ that: - 'item.changed == true' - 'item.state == "file"' - with_items: file16_result.results + with_items: "{{file16_result.results}}" - name: try to force the sub-directory to a link file: src={{output_dir}}/testing dest={{output_dir}}/sub1 state=link force=yes diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 49f5f53bfb8..e2e9e8bf3ca 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -88,7 +88,7 @@ - name: remove known_host files file: state=absent path={{ item }} - with_items: known_host_files + with_items: "{{known_host_files}}" - name: checkout ssh://git@github.com repo without accept_hostkey (expected fail) git: repo={{ repo_format2 }} dest={{ checkout_dir }} diff --git a/test/integration/roles/test_mysql_user/tasks/main.yml b/test/integration/roles/test_mysql_user/tasks/main.yml index 68042e74913..b0754390403 100644 --- a/test/integration/roles/test_mysql_user/tasks/main.yml +++ b/test/integration/roles/test_mysql_user/tasks/main.yml @@ -96,7 +96,7 @@ mysql_user: name={{ item[0] }} priv={{ item[1] }}.*:ALL append_privs=yes password={{ user_password_1 }} with_nested: - [ '{{ user_name_1 }}' , '{{ user_name_2 }}'] - - db_names + - "{{db_names}}" - name: show grants access for user1 on multiple database command: mysql "-e SHOW GRANTS FOR '{{ user_name_1 }}'@'localhost';" @@ -104,7 +104,7 @@ - name: assert grant access for user1 on multiple database assert: { that: "'{{ item }}' in result.stdout" } - with_items: db_names + with_items: "{{db_names}}" - name: show grants access for user2 on multiple database command: mysql "-e SHOW GRANTS FOR '{{ user_name_2 }}'@'localhost';" diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 9ce05938b62..c58024e3158 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -69,8 +69,8 @@ - '"json" in item.1' - item.0.stat.checksum == item.1.content | checksum with_together: - - pass_checksum.results - - pass.results + - "{{pass_checksum.results}}" + - "{{pass.results}}" - name: checksum fail_json @@ -89,8 +89,8 @@ - item.0.stat.checksum == item.1.content | checksum - '"json" not in item.1' with_together: - - fail_checksum.results - - fail.results + - "{{fail_checksum.results}}" + - "{{fail.results}}" - name: test https fetch to a site with mismatched hostname and certificate uri: diff --git a/test/integration/roles/test_win_copy/tasks/main.yml b/test/integration/roles/test_win_copy/tasks/main.yml index 3d29775894e..ff26117e64d 100644 --- a/test/integration/roles/test_win_copy/tasks/main.yml +++ b/test/integration/roles/test_win_copy/tasks/main.yml @@ -183,7 +183,7 @@ # assert: # that: # - "{{item.stat.mode}} == 0700" -# with_items: dir_stats.results +# with_items: "{{dir_stats.results}}" # errors on this aren't presently ignored so this test is commented out. But it would be nice to fix. diff --git a/test/integration/roles/test_win_file/tasks/main.yml b/test/integration/roles/test_win_file/tasks/main.yml index a8d6e92b3d3..b128715226a 100644 --- a/test/integration/roles/test_win_file/tasks/main.yml +++ b/test/integration/roles/test_win_file/tasks/main.yml @@ -183,7 +183,7 @@ that: - 'item.changed == true' # - 'item.state == "file"' - with_items: file16_result.results + with_items: "{{file16_result.results}}" #- name: try to force the sub-directory to a link # win_file: src={{win_output_dir}}/testing dest={{win_output_dir}}/sub1 state=link force=yes From 8e3cc3eecd7c09b8e5425543718fbbf235e00f8b Mon Sep 17 00:00:00 2001 From: Tom Paine <aioue@users.noreply.github.com> Date: Mon, 8 Feb 2016 19:19:05 +0000 Subject: [PATCH 0589/1113] Update profile_tasks.rst --- lib/ansible/plugins/callback/profile_tasks.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/profile_tasks.rst b/lib/ansible/plugins/callback/profile_tasks.rst index 86d19e9bd4e..04fe3099256 100644 --- a/lib/ansible/plugins/callback/profile_tasks.rst +++ b/lib/ansible/plugins/callback/profile_tasks.rst @@ -11,7 +11,7 @@ Mashup of 2 excellent original works: Usage ----- -Add ``profile_taks`` to the ``callback_whitelist`` in ``ansible.cfg``. +Add ``profile_tasks`` to the ``callback_whitelist`` in ``ansible.cfg``. Run playbooks as normal. From b220051c149bd6b6b8a4d40587c45062d4fcf6de Mon Sep 17 00:00:00 2001 From: Jonathan Davila <jdavila@ansible.com> Date: Mon, 8 Feb 2016 16:33:22 -0500 Subject: [PATCH 0590/1113] Added more info to the no action detected error Error fix --- lib/ansible/parsing/mod_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index fbf5e1c3d61..0974630d8f9 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -301,7 +301,7 @@ class ModuleArgsParser: obj=self._task_ds) else: - raise AnsibleParserError("no action detected in task", obj=self._task_ds) + raise AnsibleParserError("no action detected in task. This often indicates a misspelled module name, or incorrect module path.", obj=self._task_ds) elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES: templar = Templar(loader=None) raw_params = args.pop('_raw_params') From 81a40ac235f36635b0435cef50b6ca8168087053 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 8 Feb 2016 23:00:19 -0500 Subject: [PATCH 0591/1113] fix winrm erorr formatting --- lib/ansible/plugins/connection/winrm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index 5d731c31dd8..e908a995419 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -151,7 +151,7 @@ class Connection(ConnectionBase): errors.append(u'%s: %s' % (transport, err_msg)) display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_unicode(traceback.format_exc())), host=self._winrm_host) if errors: - raise AnsibleError(', '.join(to_str(errors))) + raise AnsibleError(to_str(u', '.join((errors)))) else: raise AnsibleError('No transport found for WinRM connection') From 06b2400aae0aca05f1950aa31f97a53959f3967a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 8 Feb 2016 20:28:55 -0800 Subject: [PATCH 0592/1113] Need to apply to_str to each element of the list so that we don't mix types in the join() "Third time's the charm" --- lib/ansible/plugins/connection/winrm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index e908a995419..125e54cb9aa 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -151,7 +151,7 @@ class Connection(ConnectionBase): errors.append(u'%s: %s' % (transport, err_msg)) display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_unicode(traceback.format_exc())), host=self._winrm_host) if errors: - raise AnsibleError(to_str(u', '.join((errors)))) + raise AnsibleError(', '.join(map(to_str, errors))) else: raise AnsibleError('No transport found for WinRM connection') From 0f15e59cb28e8adbe17a6b6163ab469ba7e1eb1f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 8 Feb 2016 22:05:46 -0800 Subject: [PATCH 0593/1113] Also hide the before state of files with --diff and no_log --- lib/ansible/plugins/action/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index b81fd495619..cc2afdc8651 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -617,7 +617,10 @@ class ActionBase(with_metaclass(ABCMeta, object)): diff['after_header'] = 'dynamically generated' diff['after'] = source - if self._play_context.no_log and 'after' in diff: - diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]" + if self._play_context.no_log: + if 'before' in diff: + diff["before"] = "" + if 'after' in diff: + diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]" return diff From 9f1eea43fa15aa6457e9b8bd099514ae8ada30d9 Mon Sep 17 00:00:00 2001 From: Kamil Szczygiel <kamil.szczygiel@intel.com> Date: Tue, 9 Feb 2016 08:49:26 +0100 Subject: [PATCH 0594/1113] support for python < 2.7 --- lib/ansible/module_utils/vmware.py | 31 +++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/lib/ansible/module_utils/vmware.py b/lib/ansible/module_utils/vmware.py index 6bba123f26d..2f895801466 100644 --- a/lib/ansible/module_utils/vmware.py +++ b/lib/ansible/module_utils/vmware.py @@ -116,26 +116,27 @@ def connect_to_api(module, disconnect_atexit=True): password = module.params['password'] validate_certs = module.params['validate_certs'] + if validate_certs and not hasattr(ssl, 'SSLContext'): + module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update python or or use validate_certs=false') + try: - if validate_certs: - service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password) - else: - context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) - context.verify_mode = ssl.CERT_NONE - service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context) - - # Disabling atexit should be used in special cases only. - # Such as IP change of the ESXi host which removes the connection anyway. - # Also removal significantly speeds up the return of the module - - if disconnect_atexit: - atexit.register(connect.Disconnect, service_instance) - return service_instance.RetrieveContent() + service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password) except vim.fault.InvalidLogin, invalid_login: module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login)) except requests.ConnectionError, connection_error: - module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error)) + if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and not validate_certs: + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.verify_mode = ssl.CERT_NONE + service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context) + else: + module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error)) + # Disabling atexit should be used in special cases only. + # Such as IP change of the ESXi host which removes the connection anyway. + # Also removal significantly speeds up the return of the module + if disconnect_atexit: + atexit.register(connect.Disconnect, service_instance) + return service_instance.RetrieveContent() def get_all_objs(content, vimtype): From 162dd2594ffd3208f8464b66ce541dc176a5a6bb Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 9 Feb 2016 12:34:15 -0500 Subject: [PATCH 0595/1113] Filter become success string from the stdout of script results Fixes #14390 --- lib/ansible/plugins/action/script.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py index 5b0f324dfcf..f1ff2703efa 100644 --- a/lib/ansible/plugins/action/script.py +++ b/lib/ansible/plugins/action/script.py @@ -97,6 +97,7 @@ class ActionModule(ActionBase): if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES: self._remove_tmp_path(tmp) + result['stdout'] = self._strip_success_message(result.get('stdout', '')) result['changed'] = True return result From d36394d6a64ff9b394405c0cd53dcacb65404cd4 Mon Sep 17 00:00:00 2001 From: Matthew Stoltenberg <matthew.stoltenberg@viavisolutions.com> Date: Tue, 9 Feb 2016 09:18:09 -0700 Subject: [PATCH 0596/1113] strip BECOME-SUCCESS at lower level Fixes #14395 --- lib/ansible/plugins/action/__init__.py | 4 +++- lib/ansible/plugins/action/async.py | 4 ---- lib/ansible/plugins/action/raw.py | 5 ----- lib/ansible/plugins/action/script.py | 1 - 4 files changed, 3 insertions(+), 11 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index cc2afdc8651..78c7e809bb1 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -544,8 +544,10 @@ class ActionBase(with_metaclass(ABCMeta, object)): if rc is None: rc = 0 - display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr)) + # be sure to remove the BECOME-SUCCESS message now + out = self._strip_success_message(out) + display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr)) return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err) def _get_first_available_file(self, faf, of=None, searchdir='files'): diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py index 5e04f37ff12..8a7175aeb86 100644 --- a/lib/ansible/plugins/action/async.py +++ b/lib/ansible/plugins/action/async.py @@ -75,8 +75,4 @@ class ActionModule(ActionBase): result['changed'] = True - # be sure to strip out the BECOME-SUCCESS message, which may - # be there depending on the output of the module - result['stdout'] = self._strip_success_message(result.get('stdout', '')) - return result diff --git a/lib/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py index c9718db4135..a76e6d6c3c2 100644 --- a/lib/ansible/plugins/action/raw.py +++ b/lib/ansible/plugins/action/raw.py @@ -37,9 +37,4 @@ class ActionModule(ActionBase): executable = self._task.args.get('executable') result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable)) - # for some modules (script, raw), the sudo success key - # may leak into the stdout due to the way the sudo/su - # command is constructed, so we filter that out here - result['stdout'] = self._strip_success_message(result.get('stdout', '')) - return result diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py index f1ff2703efa..5b0f324dfcf 100644 --- a/lib/ansible/plugins/action/script.py +++ b/lib/ansible/plugins/action/script.py @@ -97,7 +97,6 @@ class ActionModule(ActionBase): if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES: self._remove_tmp_path(tmp) - result['stdout'] = self._strip_success_message(result.get('stdout', '')) result['changed'] = True return result From a54f472b083ac50087dde31140455a10b7332d09 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 9 Feb 2016 13:30:50 -0500 Subject: [PATCH 0597/1113] Submodule pointer update for devel --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 009389bbb2e..9fc4ed04581 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 009389bbb2e472c938c63f16b5742dfb9862718b +Subproject commit 9fc4ed0458136a3da55f672f8ddefd738ad941b8 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index ff8806eaee8..6aeb2ab6cf5 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit ff8806eaee8780db61598b7047d0f15787223175 +Subproject commit 6aeb2ab6cf54e0e3d309abedb2a9f164fb550914 From b4b24a0889ae52a4ee88e5754eef97ed258915a7 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 5 Feb 2016 00:39:09 -0500 Subject: [PATCH 0598/1113] moved to base class for shell plugins fish now sets env vars correctly fish checksum now works fixed and cleaned up imports fixed typo --- lib/ansible/plugins/shell/__init__.py | 96 +++++++++++++++++++++++-- lib/ansible/plugins/shell/csh.py | 8 ++- lib/ansible/plugins/shell/fish.py | 59 ++++++++++++++- lib/ansible/plugins/shell/powershell.py | 2 - lib/ansible/plugins/shell/sh.py | 85 +--------------------- 5 files changed, 158 insertions(+), 92 deletions(-) diff --git a/lib/ansible/plugins/shell/__init__.py b/lib/ansible/plugins/shell/__init__.py index 785fc459921..50530c6b691 100644 --- a/lib/ansible/plugins/shell/__init__.py +++ b/lib/ansible/plugins/shell/__init__.py @@ -1,6 +1,6 @@ -# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# (c) 2016 RedHat # -# This file is part of Ansible +# This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -14,8 +14,96 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -# Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import os +import re +import pipes +import ansible.constants as C +import time +import random + +from ansible.compat.six import text_type + +_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$') + +class ShellBase(object): + + def __init__(self): + self.env = dict( + LANG = C.DEFAULT_MODULE_LANG, + LC_ALL = C.DEFAULT_MODULE_LANG, + LC_MESSAGES = C.DEFAULT_MODULE_LANG, + ) + + def env_prefix(self, **kwargs): + env = self.env.copy() + env.update(kwargs) + return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in env.items()]) + + def join_path(self, *args): + return os.path.join(*args) + + # some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say + def get_remote_filename(self, base_name): + return base_name.strip() + + def path_has_trailing_slash(self, path): + return path.endswith('/') + + def chmod(self, mode, path): + path = pipes.quote(path) + return 'chmod %s %s' % (mode, path) + + def remove(self, path, recurse=False): + path = pipes.quote(path) + cmd = 'rm -f ' + if recurse: + cmd += '-r ' + return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL) + + def mkdtemp(self, basefile=None, system=False, mode=None): + if not basefile: + basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) + basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile) + if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')): + basetmp = self.join_path('/tmp', basefile) + cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT) + cmd += ' %s echo %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT) + + # change the umask in a subshell to achieve the desired mode + # also for directories created with `mkdir -p` + if mode: + tmp_umask = 0o777 & ~mode + cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT) + + return cmd + + def expand_user(self, user_home_path): + ''' Return a command to expand tildes in a path + + It can be either "~" or "~username". We use the POSIX definition of + a username: + http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426 + http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276 + ''' + + # Check that the user_path to expand is safe + if user_home_path != '~': + if not _USER_HOME_PATH_RE.match(user_home_path): + # pipes.quote will make the shell return the string verbatim + user_home_path = pipes.quote(user_home_path) + return 'echo %s' % user_home_path + + def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): + # don't quote the cmd if it's an empty string, because this will break pipelining mode + if cmd.strip() != '': + cmd = pipes.quote(cmd) + cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd] + if arg_path is not None: + cmd_parts.append(arg_path) + new_cmd = " ".join(cmd_parts) + if rm_tmp: + new_cmd = '%s; rm -rf "%s" %s' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL) + return new_cmd diff --git a/lib/ansible/plugins/shell/csh.py b/lib/ansible/plugins/shell/csh.py index 6f1008be012..c4d6319dc5c 100644 --- a/lib/ansible/plugins/shell/csh.py +++ b/lib/ansible/plugins/shell/csh.py @@ -17,9 +17,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.plugins.shell.sh import ShellModule as ShModule +from ansible.plugins.shell import ShellBase -class ShellModule(ShModule): +class ShellModule(ShellBase): # Common shell filenames that this plugin handles COMPATIBLE_SHELLS = frozenset(('csh', 'tcsh')) @@ -29,8 +29,12 @@ class ShellModule(ShModule): # How to end lines in a python script one-liner _SHELL_EMBEDDED_PY_EOL = '\\\n' _SHELL_REDIRECT_ALLNULL = '>& /dev/null' + _SHELL_AND = '&&' + _SHELL_OR = '||' _SHELL_SUB_LEFT = '"`' _SHELL_SUB_RIGHT = '`"' + _SHELL_GROUP_LEFT = '(' + _SHELL_GROUP_RIGHT = ')' def env_prefix(self, **kwargs): return 'env %s' % super(ShellModule, self).env_prefix(**kwargs) diff --git a/lib/ansible/plugins/shell/fish.py b/lib/ansible/plugins/shell/fish.py index aee4cf0867b..ddee24ac6d3 100644 --- a/lib/ansible/plugins/shell/fish.py +++ b/lib/ansible/plugins/shell/fish.py @@ -17,7 +17,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import pipes from ansible.plugins.shell.sh import ShellModule as ShModule +from ansible.compat.six import text_type class ShellModule(ShModule): @@ -26,6 +28,8 @@ class ShellModule(ShModule): # Family of shells this has. Must match the filename without extension SHELL_FAMILY = 'fish' + _SHELL_EMBEDDED_PY_EOL = '\n' + _SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1' _SHELL_AND = '; and' _SHELL_OR = '; or' _SHELL_SUB_LEFT = '(' @@ -34,4 +38,57 @@ class ShellModule(ShModule): _SHELL_GROUP_RIGHT = '' def env_prefix(self, **kwargs): - return 'env %s' % super(ShellModule, self).env_prefix(**kwargs) + env = self.env.copy() + env.update(kwargs) + return ' '.join(['set -lx %s %s;' % (k, pipes.quote(text_type(v))) for k,v in env.items()]) + + def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): + # don't quote the cmd if it's an empty string, because this will break pipelining mode + if cmd.strip() != '': + cmd = pipes.quote(cmd) + cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd] + if arg_path is not None: + cmd_parts.append(arg_path) + new_cmd = " ".join(cmd_parts) + if rm_tmp: + new_cmd = 'begin ; %s; rm -rf "%s" %s ; end' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL) + return new_cmd + + def checksum(self, path, python_interp): + # The following test is fish-compliant. + # + # In the following test, each condition is a check and logical + # comparison (or or and) that sets the rc value. Every check is run so + # the last check in the series to fail will be the rc that is + # returned. + # + # If a check fails we error before invoking the hash functions because + # hash functions may successfully take the hash of a directory on BSDs + # (UFS filesystem?) which is not what the rest of the ansible code + # expects + # + # If all of the available hashing methods fail we fail with an rc of + # 0. This logic is added to the end of the cmd at the bottom of this + # function. + + # Return codes: + # checksum: success! + # 0: Unknown error + # 1: Remote file does not exist + # 2: No read permissions on the file + # 3: File is a directory + # 4: No python interpreter + + # Quoting gets complex here. We're writing a python string that's + # used by a variety of shells on the remote host to invoke a python + # "one-liner". + shell_escaped_path = pipes.quote(path) + test = "set rc flag; [ -r %(p)s ] %(shell_or)s set rc 2; [ -f %(p)s ] %(shell_or)s set rc 1; [ -d %(p)s ] %(shell_and)s set rc 3; %(i)s -V 2>/dev/null %(shell_or)s set rc 4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"$rc \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) + csums = [ + u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3) + u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4 + ] + + cmd = (" %s " % self._SHELL_OR).join(csums) + cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path) + return cmd diff --git a/lib/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py index acde565e2fc..72f2570549f 100644 --- a/lib/ansible/plugins/shell/powershell.py +++ b/lib/ansible/plugins/shell/powershell.py @@ -20,9 +20,7 @@ __metaclass__ = type import base64 import os import re -import random import shlex -import time from ansible.utils.unicode import to_bytes, to_unicode diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index 1e69665c0f7..671eb7139ec 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -17,18 +17,12 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import os -import re import pipes -import ansible.constants as C -import time -import random -from ansible.compat.six import text_type +from ansible.plugins.shell import ShellBase -_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$') -class ShellModule(object): +class ShellModule(ShellBase): # Common shell filenames that this plugin handles. # Note: sh is the default shell plugin so this plugin may also be selected @@ -47,69 +41,6 @@ class ShellModule(object): _SHELL_GROUP_LEFT = '(' _SHELL_GROUP_RIGHT = ')' - def env_prefix(self, **kwargs): - '''Build command prefix with environment variables.''' - env = dict( - LANG = C.DEFAULT_MODULE_LANG, - LC_ALL = C.DEFAULT_MODULE_LANG, - LC_MESSAGES = C.DEFAULT_MODULE_LANG, - ) - env.update(kwargs) - return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in env.items()]) - - def join_path(self, *args): - return os.path.join(*args) - - # some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say - def get_remote_filename(self, base_name): - return base_name.strip() - - def path_has_trailing_slash(self, path): - return path.endswith('/') - - def chmod(self, mode, path): - path = pipes.quote(path) - return 'chmod %s %s' % (mode, path) - - def remove(self, path, recurse=False): - path = pipes.quote(path) - cmd = 'rm -f ' - if recurse: - cmd += '-r ' - return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL) - - def mkdtemp(self, basefile=None, system=False, mode=None): - if not basefile: - basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) - basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile) - if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')): - basetmp = self.join_path('/tmp', basefile) - cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT) - cmd += ' %s echo %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT) - - # change the umask in a subshell to achieve the desired mode - # also for directories created with `mkdir -p` - if mode: - tmp_umask = 0o777 & ~mode - cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT) - - return cmd - - def expand_user(self, user_home_path): - ''' Return a command to expand tildes in a path - - It can be either "~" or "~username". We use the POSIX definition of - a username: - http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426 - http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276 - ''' - - # Check that the user_path to expand is safe - if user_home_path != '~': - if not _USER_HOME_PATH_RE.match(user_home_path): - # pipes.quote will make the shell return the string verbatim - user_home_path = pipes.quote(user_home_path) - return 'echo %s' % user_home_path def checksum(self, path, python_interp): # The following test needs to be SH-compliant. BASH-isms will @@ -151,15 +82,3 @@ class ShellModule(object): cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path) return cmd - def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): - # don't quote the cmd if it's an empty string, because this will - # break pipelining mode - if cmd.strip() != '': - cmd = pipes.quote(cmd) - cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd] - if arg_path is not None: - cmd_parts.append(arg_path) - new_cmd = " ".join(cmd_parts) - if rm_tmp: - new_cmd = '%s; rm -rf "%s" %s' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL) - return new_cmd From 1b8dec9c886ba3584ef31ad798133b7114a8583d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 9 Feb 2016 18:10:36 -0500 Subject: [PATCH 0599/1113] avoid termination message when term is internal --- lib/ansible/cli/adhoc.py | 3 ++- lib/ansible/executor/playbook_executor.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index faacd6c67bb..4cba2be16ca 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -91,7 +91,8 @@ class AdHocCLI(CLI): def _terminate(self, signum=None, framenum=None): if signum is not None: - raise SystemExit("Interrupt detected, shutting down gracefully") + display.debug("Termination signal detected, shutting down gracefully") + raise SystemExit def run(self): ''' use Runner lib to do SSH things ''' diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index c1a1303a9c7..83449f14352 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -208,8 +208,8 @@ class PlaybookExecutor: return result def _terminate(self, signum=None, framenum=None): - display.debug(framenum) - raise SystemExit("Terminating run due to external signal") + display.debug("Termination signal detected, shutting down gracefully") + raise SystemExit def _get_serialized_batches(self, play): ''' From 5a88478ccca44c1b0d531a35e91e88f834db9210 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 9 Feb 2016 18:21:33 -0500 Subject: [PATCH 0600/1113] centralized TERM signal handling --- lib/ansible/cli/__init__.py | 9 +++++++++ lib/ansible/cli/adhoc.py | 8 -------- lib/ansible/executor/playbook_executor.py | 7 ------- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index ed4a2dd5db8..bfe4f81b9f6 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -27,6 +27,7 @@ import time import yaml import re import getpass +import signal import subprocess from ansible import __version__ @@ -77,6 +78,11 @@ class CLI(object): self.action = None self.callback = callback + def _terminate(self, signum=None, framenum=None): + if signum is not None: + display.debug("Termination signal detected, shutting down gracefully: %d" % os.getpid() ) + raise SystemExit + def set_action(self): """ Get the action the user wants to execute from the sys argv list. @@ -109,6 +115,9 @@ class CLI(object): else: display.display(u"No config file found; using defaults") + # Manage user interruptions + signal.signal(signal.SIGTERM, self._terminate) + @staticmethod def ask_vault_passwords(ask_new_vault_pass=False, rekey=False): ''' prompt for vault password and/or password change ''' diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 4cba2be16ca..cbd0ca09d83 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -21,7 +21,6 @@ __metaclass__ = type ######################################################## import os -import signal from ansible import constants as C from ansible.cli import CLI @@ -89,10 +88,6 @@ class AdHocCLI(CLI): tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args)), async=async, poll=poll) ] ) - def _terminate(self, signum=None, framenum=None): - if signum is not None: - display.debug("Termination signal detected, shutting down gracefully") - raise SystemExit def run(self): ''' use Runner lib to do SSH things ''' @@ -176,9 +171,6 @@ class AdHocCLI(CLI): # now create a task queue manager to execute the play self._tqm = None try: - # Manage user interruptions - signal.signal(signal.SIGTERM, self._terminate) - self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 83449f14352..19efe19f291 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -22,7 +22,6 @@ __metaclass__ = type import getpass import locale import os -import signal import sys from ansible.compat.six import string_types @@ -69,8 +68,6 @@ class PlaybookExecutor: may limit the runs to serialized groups, etc. ''' - signal.signal(signal.SIGTERM, self._terminate) - result = 0 entrylist = [] entry = {} @@ -207,10 +204,6 @@ class PlaybookExecutor: return result - def _terminate(self, signum=None, framenum=None): - display.debug("Termination signal detected, shutting down gracefully") - raise SystemExit - def _get_serialized_batches(self, play): ''' Returns a list of hosts, subdivided into batches based on From 2adddac94c40038853819954f3cefd6bffc431dd Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 10 Feb 2016 09:22:57 -0500 Subject: [PATCH 0601/1113] Catch exceptions during module execution so they don't fail the worker Fixes #14120 --- lib/ansible/executor/task_executor.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 6350cc2b0e3..4b7069cb7a3 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -24,6 +24,7 @@ import json import subprocess import sys import time +import traceback from ansible.compat.six import iteritems, string_types @@ -140,6 +141,8 @@ class TaskExecutor: return res except AnsibleError as e: return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr')) + except Exception as e: + return dict(failed=True, msg='Unexpected failure during module execution.', exception=to_unicode(traceback.format_exc()), stdout='') finally: try: self._connection.close() From d9dcb2a427f4041f121950668c4ae40b0042c0d6 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 10 Feb 2016 09:48:22 -0500 Subject: [PATCH 0602/1113] Revert "centralized TERM signal handling" This reverts commit 5a88478ccca44c1b0d531a35e91e88f834db9210. is WIP, not ready for use yet --- lib/ansible/cli/__init__.py | 9 --------- lib/ansible/cli/adhoc.py | 8 ++++++++ lib/ansible/executor/playbook_executor.py | 7 +++++++ 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index bfe4f81b9f6..ed4a2dd5db8 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -27,7 +27,6 @@ import time import yaml import re import getpass -import signal import subprocess from ansible import __version__ @@ -78,11 +77,6 @@ class CLI(object): self.action = None self.callback = callback - def _terminate(self, signum=None, framenum=None): - if signum is not None: - display.debug("Termination signal detected, shutting down gracefully: %d" % os.getpid() ) - raise SystemExit - def set_action(self): """ Get the action the user wants to execute from the sys argv list. @@ -115,9 +109,6 @@ class CLI(object): else: display.display(u"No config file found; using defaults") - # Manage user interruptions - signal.signal(signal.SIGTERM, self._terminate) - @staticmethod def ask_vault_passwords(ask_new_vault_pass=False, rekey=False): ''' prompt for vault password and/or password change ''' diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index cbd0ca09d83..4cba2be16ca 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -21,6 +21,7 @@ __metaclass__ = type ######################################################## import os +import signal from ansible import constants as C from ansible.cli import CLI @@ -88,6 +89,10 @@ class AdHocCLI(CLI): tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args)), async=async, poll=poll) ] ) + def _terminate(self, signum=None, framenum=None): + if signum is not None: + display.debug("Termination signal detected, shutting down gracefully") + raise SystemExit def run(self): ''' use Runner lib to do SSH things ''' @@ -171,6 +176,9 @@ class AdHocCLI(CLI): # now create a task queue manager to execute the play self._tqm = None try: + # Manage user interruptions + signal.signal(signal.SIGTERM, self._terminate) + self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 19efe19f291..83449f14352 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -22,6 +22,7 @@ __metaclass__ = type import getpass import locale import os +import signal import sys from ansible.compat.six import string_types @@ -68,6 +69,8 @@ class PlaybookExecutor: may limit the runs to serialized groups, etc. ''' + signal.signal(signal.SIGTERM, self._terminate) + result = 0 entrylist = [] entry = {} @@ -204,6 +207,10 @@ class PlaybookExecutor: return result + def _terminate(self, signum=None, framenum=None): + display.debug("Termination signal detected, shutting down gracefully") + raise SystemExit + def _get_serialized_batches(self, play): ''' Returns a list of hosts, subdivided into batches based on From 486304ba1cc00eee837a0b4af69891d1070882ac Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 10 Feb 2016 08:45:29 -0800 Subject: [PATCH 0603/1113] Clarify error message when module replacer encounters a bad import line for module_utils code --- lib/ansible/executor/module_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py index ba32273256f..f0e307cba0b 100644 --- a/lib/ansible/executor/module_common.py +++ b/lib/ansible/executor/module_common.py @@ -97,7 +97,7 @@ def _find_snippet_imports(module_data, module_path, strip_comments): if " import *" not in line: import_error = True if import_error: - raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path) + raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.<lib name> import *'" % module_path) snippet_name = tokens[2].split()[0] snippet_names.append(snippet_name) output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py"))) From 3079a0377354b7dbcdac0ab25b9f0be2fc7d128d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 10 Feb 2016 12:05:45 -0500 Subject: [PATCH 0604/1113] read full file when doing diff but avoid reading file at all or full file when file is too big for diffing --- lib/ansible/plugins/action/__init__.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 78c7e809bb1..9a149895a36 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -600,20 +600,21 @@ class ActionBase(with_metaclass(ABCMeta, object)): diff['before'] = dest_contents if source_file: - display.debug("Reading local copy of the file %s" % source) - try: - src = open(source) - src_contents = src.read(8192) - st = os.stat(source) - except Exception as e: - raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e))) - if "\x00" in src_contents: - diff['src_binary'] = 1 - elif st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF: + st = os.stat(source) + if st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF: diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF else: - diff['after_header'] = source - diff['after'] = src_contents + display.debug("Reading local copy of the file %s" % source) + try: + src = open(source) + src_contents = src.read() + except Exception as e: + raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e))) + if "\x00" in src_contents: + diff['src_binary'] = 1 + else: + diff['after_header'] = source + diff['after'] = src_contents else: display.debug("source of file passed in") diff['after_header'] = 'dynamically generated' From 38120c10754efdffb6bf55a9707167962149b486 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 10 Feb 2016 09:48:05 -0500 Subject: [PATCH 0605/1113] termination handling - moved to base cli class to handle centrally and duplicate less code - now avoids duplication and reiteration of signal handler by reassigning it - left note on how to do non-graceful in case we add in future as I won't remember everything i did here and don't want to 'relearn' it. --- lib/ansible/cli/__init__.py | 20 +++++++++++++++++++- lib/ansible/cli/adhoc.py | 9 --------- lib/ansible/executor/playbook_executor.py | 12 ------------ 3 files changed, 19 insertions(+), 22 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index ed4a2dd5db8..0ba370f2734 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -27,6 +27,7 @@ import time import yaml import re import getpass +import signal import subprocess from ansible import __version__ @@ -44,7 +45,7 @@ except ImportError: class SortedOptParser(optparse.OptionParser): '''Optparser which sorts the options by opt before outputting --help''' - # TODO: epilog parsing: OptionParser.format_epilog = lambda self, formatter: self.epilog + #FIXME: epilog parsing: OptionParser.format_epilog = lambda self, formatter: self.epilog def format_help(self, formatter=None, epilog=None): self.option_list.sort(key=operator.methodcaller('get_opt_string')) @@ -77,6 +78,20 @@ class CLI(object): self.action = None self.callback = callback + def _terminate(self, signum=None, framenum=None): + if signum == signal.SIGTERM: + if hasattr(os, 'getppid'): + display.debug("Termination requested in parent, shutting down gracefully") + signal.signal(signal.SIGTERM, signal.SIG_DFL) + else: + display.debug("Term signal in child, harakiri!") + signal.signal(signal.SIGTERM, signal.SIG_IGN) + + raise SystemExit + + #NOTE: if ever want to make this immediately kill children use on parent: + #os.killpg(os.getpgid(0), signal.SIGTERM) + def set_action(self): """ Get the action the user wants to execute from the sys argv list. @@ -109,6 +124,9 @@ class CLI(object): else: display.display(u"No config file found; using defaults") + # Manage user interruptions + signal.signal(signal.SIGTERM, self._terminate) + @staticmethod def ask_vault_passwords(ask_new_vault_pass=False, rekey=False): ''' prompt for vault password and/or password change ''' diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 4cba2be16ca..7a3e208f368 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -21,7 +21,6 @@ __metaclass__ = type ######################################################## import os -import signal from ansible import constants as C from ansible.cli import CLI @@ -89,11 +88,6 @@ class AdHocCLI(CLI): tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args)), async=async, poll=poll) ] ) - def _terminate(self, signum=None, framenum=None): - if signum is not None: - display.debug("Termination signal detected, shutting down gracefully") - raise SystemExit - def run(self): ''' use Runner lib to do SSH things ''' @@ -176,9 +170,6 @@ class AdHocCLI(CLI): # now create a task queue manager to execute the play self._tqm = None try: - # Manage user interruptions - signal.signal(signal.SIGTERM, self._terminate) - self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 83449f14352..ce91b7f6025 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -19,11 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import getpass -import locale import os -import signal -import sys from ansible.compat.six import string_types @@ -32,8 +28,6 @@ from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook from ansible.template import Templar -from ansible.utils.unicode import to_unicode - try: from __main__ import display except ImportError: @@ -69,8 +63,6 @@ class PlaybookExecutor: may limit the runs to serialized groups, etc. ''' - signal.signal(signal.SIGTERM, self._terminate) - result = 0 entrylist = [] entry = {} @@ -207,10 +199,6 @@ class PlaybookExecutor: return result - def _terminate(self, signum=None, framenum=None): - display.debug("Termination signal detected, shutting down gracefully") - raise SystemExit - def _get_serialized_batches(self, play): ''' Returns a list of hosts, subdivided into batches based on From 62765858825173dd532827fe4c1dfb246eb8db47 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 10 Feb 2016 10:51:12 -0800 Subject: [PATCH 0606/1113] Module params should default to str in most cases. --- lib/ansible/module_utils/basic.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 4bb376c2882..3c14af7fbbb 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -226,7 +226,7 @@ except ImportError: FILE_COMMON_ARGUMENTS=dict( src = dict(), - mode = dict(), + mode = dict(type='raw'), owner = dict(), group = dict(), seuser = dict(), @@ -574,6 +574,7 @@ class AnsibleModule(object): 'int': self._check_type_int, 'float': self._check_type_float, 'path': self._check_type_path, + 'raw': self._check_type_raw, } if not bypass_checks: self._check_required_arguments() @@ -1360,15 +1361,23 @@ class AnsibleModule(object): value = self._check_type_str(value) return os.path.expanduser(os.path.expandvars(value)) + def _check_type_raw(self, value): + return value + def _check_argument_types(self): ''' ensure all arguments have the requested type ''' for (k, v) in self.argument_spec.items(): wanted = v.get('type', None) - if wanted is None: - continue if k not in self.params: continue + if wanted is None: + # Mostly we want to default to str. + # For values set to None explicitly, return None instead as + # that allows a user to unset a parameter + if self.params[k] is None: + continue + wanted = 'str' value = self.params[k] From f450a4cb69036c928836c8f4098c2834b20cf4a8 Mon Sep 17 00:00:00 2001 From: Shawn Silva <shawn.l.silva@gmail.com> Date: Wed, 10 Feb 2016 18:54:52 -0500 Subject: [PATCH 0607/1113] When the linode inventory is generated the linode label is used as the inventory host. If the label isn't a FQDN ansible can't connect. This will set the hostvars for the ansible_ssh_host to the linodes public IP. --- contrib/inventory/linode.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/contrib/inventory/linode.py b/contrib/inventory/linode.py index f2b61b70756..0aa7098b316 100755 --- a/contrib/inventory/linode.py +++ b/contrib/inventory/linode.py @@ -280,6 +280,11 @@ class LinodeInventory(object): node_vars["datacenter_city"] = self.get_datacenter_city(node) node_vars["public_ip"] = [addr.address for addr in node.ipaddresses if addr.is_public][0] + # Set the SSH host information, so these inventory items can be used if + # their labels aren't FQDNs + node_vars['ansible_ssh_host'] = node_vars["public_ip"] + node_vars['ansible_host'] = node_vars["public_ip"] + private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public] if private_ips: From 5a45ca8bb12678861da0448363597bd6e004cfc7 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Tue, 9 Feb 2016 14:59:44 -0500 Subject: [PATCH 0608/1113] Move Conditional class to netcfg. Added error handling for connect and execute methods. Fix comments --- lib/ansible/module_utils/eos.py | 11 ++- lib/ansible/module_utils/ios.py | 15 ++- lib/ansible/module_utils/netcfg.py | 141 +++++++++++++++++++++++------ lib/ansible/module_utils/nxos.py | 14 ++- 4 files changed, 147 insertions(+), 34 deletions(-) diff --git a/lib/ansible/module_utils/eos.py b/lib/ansible/module_utils/eos.py index c5446dc5394..5e0dfefc353 100644 --- a/lib/ansible/module_utils/eos.py +++ b/lib/ansible/module_utils/eos.py @@ -38,6 +38,7 @@ def to_list(val): else: return list() + class Eapi(object): def __init__(self, module): @@ -107,6 +108,7 @@ class Eapi(object): return response['result'] + class Cli(object): def __init__(self, module): @@ -121,15 +123,20 @@ class Cli(object): password = self.module.params['password'] self.shell = Shell() - self.shell.open(host, port=port, username=username, password=password) + + try: + self.shell.open(host, port=port, username=username, password=password) + except Exception, exc: + self.module.fail_json('Failed to connect to {0}:{1} - {2}'.format(host, port, str(exc))) def authorize(self): passwd = self.module.params['auth_pass'] self.send(Command('enable', prompt=NET_PASSWD_RE, response=passwd)) - def send(self, commands, encoding='text'): + def send(self, commands): return self.shell.send(commands) + class NetworkModule(AnsibleModule): def __init__(self, *args, **kwargs): diff --git a/lib/ansible/module_utils/ios.py b/lib/ansible/module_utils/ios.py index 95937ca2191..3340213fa81 100644 --- a/lib/ansible/module_utils/ios.py +++ b/lib/ansible/module_utils/ios.py @@ -29,6 +29,7 @@ NET_COMMON_ARGS = dict( provider=dict() ) + def to_list(val): if isinstance(val, (list, tuple)): return list(val) @@ -37,6 +38,7 @@ def to_list(val): else: return list() + class Cli(object): def __init__(self, module): @@ -51,7 +53,11 @@ class Cli(object): password = self.module.params['password'] self.shell = Shell() - self.shell.open(host, port=port, username=username, password=password) + + try: + self.shell.open(host, port=port, username=username, password=password) + except Exception, exc: + self.module.fail_json('Failed to connect to {0}:{1} - {2}'.format(host, port, str(exc))) def authorize(self): passwd = self.module.params['auth_pass'] @@ -60,6 +66,7 @@ class Cli(object): def send(self, commands): return self.shell.send(commands) + class NetworkModule(AnsibleModule): def __init__(self, *args, **kwargs): @@ -101,7 +108,10 @@ class NetworkModule(AnsibleModule): return responses def execute(self, commands, **kwargs): - return self.connection.send(commands) + try: + return self.connection.send(commands, **kwargs) + except Exception, exc: + self.fail_json(msg=exc.message, commands=commands) def disconnect(self): self.connection.close() @@ -115,6 +125,7 @@ class NetworkModule(AnsibleModule): cmd += ' all' return self.execute(cmd)[0] + def get_module(**kwargs): """Return instance of NetworkModule """ diff --git a/lib/ansible/module_utils/netcfg.py b/lib/ansible/module_utils/netcfg.py index afd8be3a56f..644a0a32dfa 100644 --- a/lib/ansible/module_utils/netcfg.py +++ b/lib/ansible/module_utils/netcfg.py @@ -38,48 +38,133 @@ class ConfigLine(object): def __ne__(self, other): return not self.__eq__(other) + def parse(lines, indent): - toplevel = re.compile(r'\S') - childline = re.compile(r'^\s*(.+)$') - repl = r'([{|}|;])' + toplevel = re.compile(r'\S') + childline = re.compile(r'^\s*(.+)$') + repl = r'([{|}|;])' - ancestors = list() - config = list() + ancestors = list() + config = list() - for line in str(lines).split('\n'): - text = str(re.sub(repl, '', line)).strip() + for line in str(lines).split('\n'): + text = str(re.sub(repl, '', line)).strip() - cfg = ConfigLine(text) - cfg.raw = line + cfg = ConfigLine(text) + cfg.raw = line - if not text or text[0] in ['!', '#']: + if not text or text[0] in ['!', '#']: + continue + + # handle top level commands + if toplevel.match(line): + ancestors = [cfg] + + # handle sub level commands + else: + match = childline.match(line) + line_indent = match.start(1) + level = int(line_indent / indent) + parent_level = level - 1 + + cfg.parents = ancestors[:level] + + if level > len(ancestors): + config.append(cfg) continue - # handle top level commands - if toplevel.match(line): - ancestors = [cfg] + for i in range(level, len(ancestors)): + ancestors.pop() - # handle sub level commands + ancestors.append(cfg) + ancestors[parent_level].children.append(cfg) + + config.append(cfg) + + return config + + +class Conditional(object): + ''' + Used in command modules to evaluate waitfor conditions + ''' + + OPERATORS = { + 'eq': ['eq', '=='], + 'neq': ['neq', 'ne', '!='], + 'gt': ['gt', '>'], + 'ge': ['ge', '>='], + 'lt': ['lt', '<'], + 'le': ['le', '<='], + 'contains': ['contains'] + } + + def __init__(self, conditional): + self.raw = conditional + + key, op, val = shlex.split(conditional) + self.key = key + self.func = self.func(op) + self.value = self._cast_value(val) + + def __call__(self, data): + try: + value = self.get_value(dict(result=data)) + return self.func(value) + except Exception: + raise ValueError(self.key) + + def _cast_value(self, value): + if value in BOOLEANS_TRUE: + return True + elif value in BOOLEANS_FALSE: + return False + elif re.match(r'^\d+\.d+$', value): + return float(value) + elif re.match(r'^\d+$', value): + return int(value) + else: + return unicode(value) + + def func(self, oper): + for func, operators in self.OPERATORS.items(): + if oper in operators: + return getattr(self, func) + raise AttributeError('unknown operator: %s' % oper) + + def get_value(self, result): + for key in self.key.split('.'): + match = re.match(r'^(.+)\[(\d+)\]', key) + if match: + key, index = match.groups() + result = result[key][int(index)] else: - match = childline.match(line) - line_indent = match.start(1) - level = int(line_indent / indent) - parent_level = level - 1 + result = result.get(key) + return result - cfg.parents = ancestors[:level] + def number(self, value): + if '.' in str(value): + return float(value) + else: + return int(value) - if level > len(ancestors): - config.append(cfg) - continue + def eq(self, value): + return value == self.value - for i in range(level, len(ancestors)): - ancestors.pop() + def neq(self, value): + return value != self.value - ancestors.append(cfg) - ancestors[parent_level].children.append(cfg) + def gt(self, value): + return self.number(value) > self.value - config.append(cfg) + def ge(self, value): + return self.number(value) >= self.value - return config + def lt(self, value): + return self.number(value) < self.value + def le(self, value): + return self.number(value) <= self.value + def contains(self, value): + return self.value in value diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py index 3c8837e0968..8c681830a6d 100644 --- a/lib/ansible/module_utils/nxos.py +++ b/lib/ansible/module_utils/nxos.py @@ -128,6 +128,7 @@ class Nxapi(object): return result + class Cli(object): def __init__(self, module): @@ -142,11 +143,16 @@ class Cli(object): password = self.module.params['password'] self.shell = Shell() - self.shell.open(host, port=port, username=username, password=password) + + try: + self.shell.open(host, port=port, username=username, password=password) + except Exception, exc: + self.module.fail_json('Failed to connect to {0}:{1} - {2}'.format(host, port, str(exc))) def send(self, commands, encoding='text'): return self.shell.send(commands) + class NetworkModule(AnsibleModule): def __init__(self, *args, **kwargs): @@ -190,7 +196,10 @@ class NetworkModule(AnsibleModule): return responses def execute(self, commands, **kwargs): - return self.connection.send(commands, **kwargs) + try: + return self.connection.send(commands, **kwargs) + except Exception, exc: + self.fail_json(msg=exc.message, commands=commands) def disconnect(self): self.connection.close() @@ -206,6 +215,7 @@ class NetworkModule(AnsibleModule): response = self.execute(cmd) return response[0] + def get_module(**kwargs): """Return instance of NetworkModule """ From 9c36c0aa80c041ca58595bc4efd37d813c876ca7 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Thu, 11 Feb 2016 07:11:36 -0500 Subject: [PATCH 0609/1113] minor bug fixes and updates to shell --- lib/ansible/module_utils/shell.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/shell.py b/lib/ansible/module_utils/shell.py index 13506c43226..9a63c9821d2 100644 --- a/lib/ansible/module_utils/shell.py +++ b/lib/ansible/module_utils/shell.py @@ -44,6 +44,8 @@ CLI_ERRORS_RE = [ re.compile(r"connection timed out", re.I), re.compile(r"[^\r\n]+ not found", re.I), re.compile(r"'[^']' +returned error code: ?\d+"), + re.compile(r"syntax error"), + re.compile(r"unknown command") ] def to_list(val): @@ -77,6 +79,8 @@ class Shell(object): self.ssh = None self.shell = None + self._matched_prompt = None + self.prompts = list() self.prompts.extend(CLI_PROMPTS_RE) @@ -169,7 +173,9 @@ class Shell(object): raise ShellError('%s' % response) for regex in self.prompts: - if regex.search(response): + match = regex.search(response) + if match: + self._matched_prompt = match.group() return True def get_cli_connection(module): From 8f9badb2b49b6c8a7105ca674b80aab3f16773ea Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Mon, 8 Feb 2016 08:12:09 -0500 Subject: [PATCH 0610/1113] update shared module junos to handle root logins --- lib/ansible/module_utils/junos.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/junos.py b/lib/ansible/module_utils/junos.py index 33af9266e72..4912d008ff0 100644 --- a/lib/ansible/module_utils/junos.py +++ b/lib/ansible/module_utils/junos.py @@ -77,7 +77,8 @@ class NetworkModule(AnsibleModule): def connect(self): self.connection = Cli(self) self.connection.connect() - self.execute('cli') + if self.connection.shell._matched_prompt.strip().endswith('%'): + self.execute('cli') self.execute('set cli screen-length 0') def configure(self, commands): From cf251258a8b2a1e6befd6e72f4691cf450d70cb2 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Thu, 11 Feb 2016 08:26:38 -0500 Subject: [PATCH 0611/1113] initial add of new action plugin junos_template This adds a new action plugin, junos_template that allows the the junos_template module to perform the templating function. It implements net_template --- lib/ansible/plugins/action/junos_template.py | 28 ++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 lib/ansible/plugins/action/junos_template.py diff --git a/lib/ansible/plugins/action/junos_template.py b/lib/ansible/plugins/action/junos_template.py new file mode 100644 index 00000000000..5334b644d32 --- /dev/null +++ b/lib/ansible/plugins/action/junos_template.py @@ -0,0 +1,28 @@ +# +# Copyright 2015 Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase +from ansible.plugins.action.net_template import ActionModule as NetActionModule + +class ActionModule(NetActionModule, ActionBase): + pass + + From f014b548e4b0bd30b0203bf3d6f52b327ac3eeba Mon Sep 17 00:00:00 2001 From: Timothy Hopper <tdhopper@users.noreply.github.com> Date: Thu, 11 Feb 2016 10:34:39 -0500 Subject: [PATCH 0612/1113] Link to /videos instead of the less obvious /resources --- docsite/rst/quickstart.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/quickstart.rst b/docsite/rst/quickstart.rst index 055e4aecabb..1f6957b4336 100644 --- a/docsite/rst/quickstart.rst +++ b/docsite/rst/quickstart.rst @@ -3,7 +3,7 @@ Quickstart Video We've recorded a short video that shows how to get started with Ansible that you may like to use alongside the documentation. -The `quickstart video <http://ansible.com/resources>`_ is about 30 minutes long and will show you some of the basics about your +The `quickstart video <http://www.ansible.com/videos>`_ is about 30 minutes long and will show you some of the basics about your first steps with Ansible. Enjoy, and be sure to visit the rest of the documentation to learn more. From a8aa5ff4eb7eb6c981f3446be508d6da3b5c7a68 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 11 Feb 2016 10:34:31 -0500 Subject: [PATCH 0613/1113] fix cartesian lookup it seems that this was not working in 1.9 but we swallowed up the error fixes #14437 --- lib/ansible/plugins/lookup/cartesian.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/lookup/cartesian.py b/lib/ansible/plugins/lookup/cartesian.py index ce31ef9700b..75d3a0735a8 100644 --- a/lib/ansible/plugins/lookup/cartesian.py +++ b/lib/ansible/plugins/lookup/cartesian.py @@ -50,5 +50,5 @@ class LookupModule(LookupBase): if len(my_list) == 0: raise AnsibleError("with_cartesian requires at least one element in each list") - return [self._flatten(x) for x in product(*my_list, fillvalue=None)] + return [self._flatten(x) for x in product(*my_list)] From bb155e11423a2d603c6b7ed4b67e8c13e6013fb9 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 11 Feb 2016 10:46:44 -0500 Subject: [PATCH 0614/1113] added test for cartesian lookup --- .../roles/test_lookups/tasks/main.yml | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index 3c5e066ee34..5b179690f11 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -194,3 +194,23 @@ - assert: that: - "'www.kennethreitz.org' in web_data" + +- name: Test cartesian lookup + debug: var={{item}} + with_cartesian: + - ["A", "B", "C"] + - ["1", "2", "3"] + register: product + +- name: Verify cartesian lookup + assert: + that: + - product.results[0]['item'] == ["A", "1"] + - product.results[1]['item'] == ["A", "2"] + - product.results[2]['item'] == ["A", "3"] + - product.results[3]['item'] == ["B", "1"] + - product.results[4]['item'] == ["B", "2"] + - product.results[5]['item'] == ["B", "3"] + - product.results[6]['item'] == ["C", "1"] + - product.results[7]['item'] == ["C", "2"] + - product.results[8]['item'] == ["C", "3"] From 347b28252a407f106320999899aeb6bbf8a1b7fe Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 11 Feb 2016 14:14:37 -0500 Subject: [PATCH 0615/1113] allow skipping tasks due to undefined var mimic 1.x behaviour but give out big deprecation message, not only for missing attribute but any undefined error. --- lib/ansible/executor/task_executor.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 4b7069cb7a3..2af5908a275 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -178,21 +178,15 @@ class TaskExecutor: # first_found loops are special. If the item is undefined # then we want to fall through to the next value rather # than failing. - loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, - loader=self._loader, fail_on_undefined=False, convert_bare=True) + loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False, convert_bare=True) loop_terms = [t for t in loop_terms if not templar._contains_vars(t)] else: try: - loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, - loader=self._loader, fail_on_undefined=True, convert_bare=True) + loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: - if u'has no attribute' in to_unicode(e): - loop_terms = [] - display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.") - else: - raise - items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, - templar=templar).run(terms=loop_terms, variables=self._job_vars) + loop_terms = [] + display.deprecated("Skipping task due to undefined Error, in the future this will be a fatal error.") + items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=self._job_vars) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) From effa64383cf866d10700f8ed70b2a23ed80c6a88 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 11 Feb 2016 14:54:18 -0500 Subject: [PATCH 0616/1113] removed signal trapping --- lib/ansible/cli/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 0ba370f2734..fc69af6029b 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -125,7 +125,7 @@ class CLI(object): display.display(u"No config file found; using defaults") # Manage user interruptions - signal.signal(signal.SIGTERM, self._terminate) + #signal.signal(signal.SIGTERM, self._terminate) @staticmethod def ask_vault_passwords(ask_new_vault_pass=False, rekey=False): From 0792983ec8c19cc8341525dbd6a21a0446867bab Mon Sep 17 00:00:00 2001 From: Marc Poirier <user.name> Date: Thu, 11 Feb 2016 15:29:48 -0500 Subject: [PATCH 0617/1113] Display the string which is causing an exception to be raised. In the ansible template module, when there is an error while expanding a templated string, displaying the string causing the exception is very useful. --- lib/ansible/template/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index fdc8eba720e..bec34f81aa7 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -464,7 +464,7 @@ class Templar: try: t = myenv.from_string(data) except TemplateSyntaxError as e: - raise AnsibleError("template error while templating string: %s" % str(e)) + raise AnsibleError("template error while templating string: %s. String: %s" % (str(e), data)) except Exception as e: if 'recursion' in str(e): raise AnsibleError("recursive loop detected in template string: %s" % data) From 99c4459224ccc94d086b993695ceb401ab0fe9dc Mon Sep 17 00:00:00 2001 From: Jason McKerr <jmckerr@jmckerr-OSX.local> Date: Thu, 11 Feb 2016 15:51:53 -0500 Subject: [PATCH 0618/1113] Initial checkin of new roadmap file. Currently encompasses 2.1 roadmap and some community objectives. --- ROADMAP.md | 98 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 ROADMAP.md diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 00000000000..51a16bd7b7d --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,98 @@ +Ansible Roadmap +============= +This document is now the location for published Ansible Core roadmaps. + +The roadmap will be updated by version. Based on team and community feedback an initial roadmap will be published for a major or minor version (2.0, 2.1) Subminor versions will generally not have roadmaps published. + +This is the first time we've published this and asked for feedback in exactly this manner. So feedback on the roadmap and the new process is quite welcome. We're aiming for further transparency and better inclusion of both community desires and submissions. + +These are our *best guess* roadmaps based on our own experience and based on requests and feedback from the community. There are things that may not make it on due to time constraints, lack of community maintainers, etc. And there may be things we missed, so each roadmap is both published as an idea of what's upcoming in Ansible, but also as a medium for seeking further feedback from the community. Here are the good places for you to submit feedback: + + * Our google-group ansible-devel + * Ansible Fest conferences. + * IRC: Our freenode channel #ansible (this one may have things lost in lots of conversation, so a caution). + +2.1 Roadmap +========== +## Windows, General +* Figuring out privilege escalation (runas w/ username/password) +* Implement kerberos encryption over http +* pywinrm conversion to requests (Some mess here on pywinrm/requests. will need docs etc.) +* NTLM support + +## Modules +* Windows + * Finish cleaning up tests and support for post-beta release. + * Strict mode cleanup (one module in core) + * Domain user/group management + * finish win\_host and win\_rm in the domain/workgroup modules. + * Close 2 existing PRs (These were deemed insufficient) + * Replicate python module API in PS/C# (deprecate hodgepodge of stuff from module_utils/powershell.ps1) +* Network + * Cisco modules (ios, iosxr, nxos, iosxe) + * Arista modules (eos) + * Juniper modules (junos) + * OpenSwitch + * Cumulus + * Dell (os10) - At risk + * Netconf shared module + * Hooks for supporting Tower credentials +* VMware (This one is a little at risk due to staffing. We're investigating some community maintainers and shifting some people at Ansible around, but it is a VERY high priority). + * vsphere\_guest brought to parity with other vmware modules (vs Viasat and 'whereismyjetpack' provided modules) + * VMware modules moved to official pyvmomi bindings + * VMware inventory script updates for pyvmomi, adding tagging support +* Azure (Notes: This is on hold until microsoft releases a working code generator. We have basic modules working against all of these resources. Could ship it against current SDK, but may break. Or should we pin the version?) + * Minimal Azure coverage using new ARM api + * Resource Group + * Virtual Network + * Subnet + * Public IP + * Network Interface + * Storage Account + * Security Group + * Virtual Machine + * Update of inventory script to use new API, adding tagging support +* Docker: + * Start Docker module refactor + * Update to match current docker CLI capabilities + * Docker exec support +* Upgrade other cloud modules or work with community maintainers to upgrade. (In order) + * AWS (Community maintainers) + * Openstack (Community maintainers) + * Google (Google/Community) + * Digital Ocean (Community) +* Ziploader: + * Write code to create the zipfile that gets passed across the wire to be run on the remote python. + * Port most of the functionality in module_utils to be usage in ziploader instead. + * Port a few essential modules to use ziploader instead of module-replacer as proof of concept. + * New modules will be able to use ziploader. Old modules will need to be ported in future releases (Some modules will not need porting but others will) + * Better testing of modules, caching of modules clientside(Have not yet arrived at an architecture for this that we like. Low priority), better code sharing between ansible/ansible and modules + * ziploader is a helpful building block for: python3 porting(high priority), better code sharing between modules(medium priority) + * ziploader is a good idea before: enabling users to have custom module_utils directories +* Expand module diff support (already in progress in devel) + * Framework done. Need to add to modules, test etc. + * Coordinate with community to update their modules. +* Things we are kicking down the road that we said we’d do + * NOT remerging core with ansible/ansible this release cycle. +* Community stuff + * Define the process/ETA for reviewing PR’s from community. + * Publish better docs and how-tos for submitting code/freatures/fixes. + + + + + + + + + + + + + + + + + + + From bab251233ab551f267f57245127c73322b0e47d7 Mon Sep 17 00:00:00 2001 From: Jason McKerr <jmckerr@jmckerr-OSX.local> Date: Thu, 11 Feb 2016 15:57:05 -0500 Subject: [PATCH 0619/1113] add target date --- ROADMAP.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ROADMAP.md b/ROADMAP.md index 51a16bd7b7d..7efd97ce0be 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -12,7 +12,7 @@ These are our *best guess* roadmaps based on our own experience and based on req * Ansible Fest conferences. * IRC: Our freenode channel #ansible (this one may have things lost in lots of conversation, so a caution). -2.1 Roadmap +2.1 Roadmap, Targeted to End of April ========== ## Windows, General * Figuring out privilege escalation (runas w/ username/password) From d24761af1b8d673d11ed8b154d81a875cd9b4718 Mon Sep 17 00:00:00 2001 From: Anton Belonovich <tralick@gmail.com> Date: Fri, 12 Feb 2016 00:01:36 +0300 Subject: [PATCH 0620/1113] Update playbooks_roles.rst --- docsite/rst/playbooks_roles.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 73c9710f519..980dce74158 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -227,7 +227,7 @@ While it's probably not something you should do often, you can also conditionall This works by applying the conditional to every task in the role. Conditionals are covered later on in the documentation. -Finally, you may wish to assign tags to the roles you specify. You can do so inline::: +Finally, you may wish to assign tags to the roles you specify. You can do so inline:: --- From 37879ea671a50072683788d2265123d7d594adeb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 11 Feb 2016 13:12:41 -0800 Subject: [PATCH 0621/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9fc4ed04581..8d126bd8774 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9fc4ed0458136a3da55f672f8ddefd738ad941b8 +Subproject commit 8d126bd877444c9557b1671521516447cc557d3f diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 6aeb2ab6cf5..f6c5ed987f7 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 6aeb2ab6cf54e0e3d309abedb2a9f164fb550914 +Subproject commit f6c5ed987f7f8ec20ad1d417b4a39ba6bbc5d7bc From 2d01d43e51b1cfb34883f8b302cd1e7048e062f4 Mon Sep 17 00:00:00 2001 From: Jason McKerr <jmckerr@jmckerr-OSX.local> Date: Thu, 11 Feb 2016 18:42:12 -0500 Subject: [PATCH 0622/1113] just some more grammer, punctuation cleanup. --- ROADMAP.md | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/ROADMAP.md b/ROADMAP.md index 7efd97ce0be..069fac207d6 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -2,17 +2,17 @@ Ansible Roadmap ============= This document is now the location for published Ansible Core roadmaps. -The roadmap will be updated by version. Based on team and community feedback an initial roadmap will be published for a major or minor version (2.0, 2.1) Subminor versions will generally not have roadmaps published. +The roadmap will be updated by version. Based on team and community feedback, an initial roadmap will be published for a major or minor version (2.0, 2.1). Subminor versions will generally not have roadmaps published. -This is the first time we've published this and asked for feedback in exactly this manner. So feedback on the roadmap and the new process is quite welcome. We're aiming for further transparency and better inclusion of both community desires and submissions. +This is the first time we've published this and asked for feedback in this manner. So feedback on the roadmap and the new process is quite welcome. We are aiming for further transparency and better inclusion of both community desires and submissions. -These are our *best guess* roadmaps based on our own experience and based on requests and feedback from the community. There are things that may not make it on due to time constraints, lack of community maintainers, etc. And there may be things we missed, so each roadmap is both published as an idea of what's upcoming in Ansible, but also as a medium for seeking further feedback from the community. Here are the good places for you to submit feedback: +These are our *best guess* roadmaps based on our own experience and based on requests and feedback from the community. There are things that may not make it on due to time constraints, lack of community maintainers, etc. And there may be things we missed, so each roadmap is published both as an idea of what is upcoming in Ansible, and as a medium for seeking further feedback from the community. Here are the good places for you to submit feedback: - * Our google-group ansible-devel + * Our google-group: ansible-devel * Ansible Fest conferences. * IRC: Our freenode channel #ansible (this one may have things lost in lots of conversation, so a caution). -2.1 Roadmap, Targeted to End of April +2.1 Roadmap, Targeted for the End of April ========== ## Windows, General * Figuring out privilege escalation (runas w/ username/password) @@ -22,11 +22,11 @@ These are our *best guess* roadmaps based on our own experience and based on req ## Modules * Windows - * Finish cleaning up tests and support for post-beta release. + * Finish cleaning up tests and support for post-beta release * Strict mode cleanup (one module in core) * Domain user/group management - * finish win\_host and win\_rm in the domain/workgroup modules. - * Close 2 existing PRs (These were deemed insufficient) + * Finish win\_host and win\_rm in the domain/workgroup modules. + * Close 2 existing PRs (These were deemed insufficient) * Replicate python module API in PS/C# (deprecate hodgepodge of stuff from module_utils/powershell.ps1) * Network * Cisco modules (ios, iosxr, nxos, iosxe) @@ -62,21 +62,21 @@ These are our *best guess* roadmaps based on our own experience and based on req * Google (Google/Community) * Digital Ocean (Community) * Ziploader: - * Write code to create the zipfile that gets passed across the wire to be run on the remote python. - * Port most of the functionality in module_utils to be usage in ziploader instead. - * Port a few essential modules to use ziploader instead of module-replacer as proof of concept. + * Write code to create the zipfile that gets passed across the wire to be run on the remote python + * Port most of the functionality in module\_utils to be usage in ziploader instead + * Port a few essential modules to use ziploader instead of module-replacer as proof of concept * New modules will be able to use ziploader. Old modules will need to be ported in future releases (Some modules will not need porting but others will) - * Better testing of modules, caching of modules clientside(Have not yet arrived at an architecture for this that we like. Low priority), better code sharing between ansible/ansible and modules + * Better testing of modules, caching of modules clientside(Have not yet arrived at an architecture for this that we like), better code sharing between ansible/ansible and modules * ziploader is a helpful building block for: python3 porting(high priority), better code sharing between modules(medium priority) * ziploader is a good idea before: enabling users to have custom module_utils directories * Expand module diff support (already in progress in devel) * Framework done. Need to add to modules, test etc. - * Coordinate with community to update their modules. + * Coordinate with community to update their modules * Things we are kicking down the road that we said we’d do - * NOT remerging core with ansible/ansible this release cycle. + * NOT remerging core with ansible/ansible this release cycle * Community stuff - * Define the process/ETA for reviewing PR’s from community. - * Publish better docs and how-tos for submitting code/freatures/fixes. + * Define the process/ETA for reviewing PR’s from community + * Publish better docs and how-tos for submitting code/freatures/fixes From 8e4ed34cbd72e78b67380b2f1e9d09610a73f9b9 Mon Sep 17 00:00:00 2001 From: Jason McKerr <jmckerr@jmckerr-OSX.local> Date: Thu, 11 Feb 2016 20:56:33 -0500 Subject: [PATCH 0623/1113] series of changes based on PR comments --- ROADMAP.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ROADMAP.md b/ROADMAP.md index 069fac207d6..d4982369d45 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,16 +1,16 @@ -Ansible Roadmap +Roadmap For Ansible by RedHat ============= This document is now the location for published Ansible Core roadmaps. The roadmap will be updated by version. Based on team and community feedback, an initial roadmap will be published for a major or minor version (2.0, 2.1). Subminor versions will generally not have roadmaps published. -This is the first time we've published this and asked for feedback in this manner. So feedback on the roadmap and the new process is quite welcome. We are aiming for further transparency and better inclusion of both community desires and submissions. +This is the first time Ansible has published this and asked for feedback in this manner. So feedback on the roadmap and the new process is quite welcome. The team is aiming for further transparency and better inclusion of both community desires and submissions. -These are our *best guess* roadmaps based on our own experience and based on requests and feedback from the community. There are things that may not make it on due to time constraints, lack of community maintainers, etc. And there may be things we missed, so each roadmap is published both as an idea of what is upcoming in Ansible, and as a medium for seeking further feedback from the community. Here are the good places for you to submit feedback: +These roadmaps are the team's *best guess* roadmaps based on the Ansible team's experience and are also based on requests and feedback from the community. There are things that may not make it on due to time constraints, lack of community maintainers, etc. And there may be things that got missed, so each roadmap is published both as an idea of what is upcoming in Ansible, and as a medium for seeking further feedback from the community. Here are the good places for you to submit feedback: - * Our google-group: ansible-devel + * Ansible's google-group: ansible-devel * Ansible Fest conferences. - * IRC: Our freenode channel #ansible (this one may have things lost in lots of conversation, so a caution). + * IRC freenode channel: #ansible-devel (this one may have things lost in lots of conversation, so a caution). 2.1 Roadmap, Targeted for the End of April ========== @@ -41,7 +41,7 @@ These are our *best guess* roadmaps based on our own experience and based on req * vsphere\_guest brought to parity with other vmware modules (vs Viasat and 'whereismyjetpack' provided modules) * VMware modules moved to official pyvmomi bindings * VMware inventory script updates for pyvmomi, adding tagging support -* Azure (Notes: This is on hold until microsoft releases a working code generator. We have basic modules working against all of these resources. Could ship it against current SDK, but may break. Or should we pin the version?) +* Azure (Notes: This is on hold until Microsoft swaps out the code generator on the Azure Python SDK, which may introduce breaking changes. We have basic modules working against all of these resources at this time. Could ship it against current SDK, but may break. Or should the version be pinned?) * Minimal Azure coverage using new ARM api * Resource Group * Virtual Network @@ -72,11 +72,11 @@ These are our *best guess* roadmaps based on our own experience and based on req * Expand module diff support (already in progress in devel) * Framework done. Need to add to modules, test etc. * Coordinate with community to update their modules -* Things we are kicking down the road that we said we’d do +* Things being kicking down the road that we said we’d do * NOT remerging core with ansible/ansible this release cycle * Community stuff * Define the process/ETA for reviewing PR’s from community - * Publish better docs and how-tos for submitting code/freatures/fixes + * Publish better docs and how-tos for submitting code/features/fixes From af68ae1e870970b38b1fb7085246a2d25d4972dd Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 11 Feb 2016 22:25:17 -0500 Subject: [PATCH 0624/1113] switched from threading to multiprocessing functions same, just keeping names from confusing people --- lib/ansible/plugins/cache/memcached.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/cache/memcached.py b/lib/ansible/plugins/cache/memcached.py index a34855bafc4..01fbefb07be 100644 --- a/lib/ansible/plugins/cache/memcached.py +++ b/lib/ansible/plugins/cache/memcached.py @@ -21,7 +21,7 @@ import collections import os import sys import time -import threading +from multiprocessing import Lock from itertools import chain from ansible import constants as C @@ -53,7 +53,7 @@ class ProxyClientPool(object): self._num_connections = 0 self._available_connections = collections.deque(maxlen=self.max_connections) self._locked_connections = set() - self._lock = threading.Lock() + self._lock = Lock() def _check_safe(self): if self.pid != os.getpid(): From a327420b6716ad3c85c3f11024beb360101389d0 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 12 Feb 2016 00:49:25 -0500 Subject: [PATCH 0625/1113] properly combine vars if merge is set hash_behaviour = merge now also applies to include vars --- lib/ansible/vars/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 4135ff17687..9cb0108ed64 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -600,8 +600,10 @@ class VariableManager: ''' Sets a value in the vars_cache for a host. ''' - host_name = host.get_name() if host_name not in self._vars_cache: self._vars_cache[host_name] = dict() - self._vars_cache[host_name][varname] = value + if varname in self._vars_cache[host_name]: + self._vars_cache[host_name][varname] = combine_vars(self._vars_cache[host_name][varname], value) + else: + self._vars_cache[host_name][varname] = value From 279b14ee93b6552b8a53b7362212f434d3d6adc0 Mon Sep 17 00:00:00 2001 From: Marius Gedminas <marius@gedmin.as> Date: Fri, 12 Feb 2016 17:52:44 +0200 Subject: [PATCH 0626/1113] Some typos in CHANGELOG.md --- CHANGELOG.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 65cca29a23b..c379fb31ccc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,7 +43,7 @@ Ansible Changes By Release * New delegate_facts directive, a boolean that allows you to apply facts to the delegated host (true/yes) instead of the inventory_hostname (no/false) which is the default and previous behaviour. * local connections now work with 'su' as a privilege escalation method * Ansible 2.0 has deprecated the “ssh” from ansible_ssh_user, ansible_ssh_host, and ansible_ssh_port to become ansible_user, ansible_host, and ansible_port. -* New ssh configuration variables(`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a +* New ssh configuration variables (`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a per-group or per-host ssh ProxyCommand or set any other ssh options. `ansible_ssh_extra_args` is used to set options that are accepted only by ssh (not sftp or scp, which have their own analogous settings). * ansible-pull can now verify the code it runs when using git as a source repository, using git's code signing and verification features. @@ -364,7 +364,7 @@ allowed in future versions: * Consolidated code from modules using urllib2 to normalize features, TLS and SNI support. * synchronize module's dest_port parameter now takes precedence over the ansible_ssh_port inventory setting. * Play output is now dynamically sized to terminal with a minimum of 80 coluumns (old default). -* vars_prompt and pause are now skipped with a warning if the play is called non interactively (i.e. pull from cron). +* vars_prompt and pause are now skipped with a warning if the play is called noninteractively (i.e. pull from cron). * Support for OpenBSD's 'doas' privilege escalation method. * Most vault operations can now be done over multilple files. * ansible-vault encrypt/decrypt read from stdin if no other input file is given, and can write to a given ``--output file`` (including stdout, '-'). @@ -375,15 +375,15 @@ allowed in future versions: * Many fixes and new options added to modules, too many to list here. * Now you can see task file and line number when using verbosity of 3 or above. * The ``[x-y]`` host range syntax is no longer supported. Note that ``[0:1]`` matches two hosts, i.e. the range is inclusive of its endpoints. -* We now recommend the Use `pattern1,pattern2` to combine host matching patterns. +* We now recommend the use of `pattern1,pattern2` to combine host matching patterns. * The use of ':' as a separator conflicts with IPv6 addresses and host ranges. It will be deprecated in the future. * The undocumented use of ';' as a separator is now deprecated. * modules and callbacks have been extended to support no_log to avoid data disclosure. -* new managed_syslog option has been added to control output to syslog on managed machines, no_log supercsedes this settings. +* new managed_syslog option has been added to control output to syslog on managed machines, no_log supersedes this settings. * Lookup, vars and action plugin pathing has been normalized, all now follow the same sequence to find relative files. * We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. -* environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default. +* environment variables passed to remote shells now default to 'controller' settings, with fallback to en_US.UTF8 which was the previous default. * add_hosts is much stricter about host name and will prevent invalid names from being added. * ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. * random cows are more random From edb36b6c4aced8231b80b3deccc2c83687082ecb Mon Sep 17 00:00:00 2001 From: Tom Paine <aioue@users.noreply.github.com> Date: Fri, 12 Feb 2016 17:13:39 +0000 Subject: [PATCH 0627/1113] Update profile_tasks.rst remove brackets on links --- lib/ansible/plugins/callback/profile_tasks.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/profile_tasks.rst b/lib/ansible/plugins/callback/profile_tasks.rst index 04fe3099256..a125d64260d 100644 --- a/lib/ansible/plugins/callback/profile_tasks.rst +++ b/lib/ansible/plugins/callback/profile_tasks.rst @@ -5,8 +5,8 @@ Ansible plugin for timing individual tasks and overall execution time. Mashup of 2 excellent original works: -- (https://github.com/jlafon/ansible-profile) -- (https://github.com/junaid18183/ansible_home/blob/master/ansible_plugins/callback_plugins/timestamp.py.old) +- https://github.com/jlafon/ansible-profile +- https://github.com/junaid18183/ansible_home/blob/master/ansible_plugins/callback_plugins/timestamp.py.old Usage ----- From 275728e0f6487153be939049b83a2d3dcd5fdff2 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 10 Feb 2016 21:27:14 -0500 Subject: [PATCH 0628/1113] Fixing bugs related to nested blocks inside roles * Make sure dep chains are checked recursively for nested blocks * Fixing iterator is_failed() check to make sure we're not in a rescue block before returning True * Use is_failed() to test whether a host should be added to the TQM failed_hosts list * Use is_failed() when compiling the list of hosts left to iterate over in both the linear and free strategies Fixes #14222 --- lib/ansible/executor/play_iterator.py | 12 ++++--- lib/ansible/playbook/block.py | 43 +++++++++++++++--------- lib/ansible/playbook/role/__init__.py | 9 ++--- lib/ansible/plugins/strategy/__init__.py | 6 ++-- lib/ansible/plugins/strategy/free.py | 2 +- lib/ansible/plugins/strategy/linear.py | 2 +- 6 files changed, 46 insertions(+), 28 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 2341c679c29..bd36b5a4175 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -198,7 +198,7 @@ class PlayIterator: task = None if s.run_state == self.ITERATING_COMPLETE: display.debug("host %s is done iterating, returning" % host.name) - return (None, None) + return (s, None) old_s = s (s, task) = self._get_next_task_from_state(s, host=host, peek=peek) @@ -207,14 +207,14 @@ class PlayIterator: if ra != rb: return True else: - return old_s.cur_dep_chain != task._block._dep_chain + return old_s.cur_dep_chain != task._block.get_dep_chain() if task and task._role: # if we had a current role, mark that role as completed if s.cur_role and _roles_are_different(task._role, s.cur_role) and host.name in s.cur_role._had_task_run and not peek: s.cur_role._completed[host.name] = True s.cur_role = task._role - s.cur_dep_chain = task._block._dep_chain + s.cur_dep_chain = task._block.get_dep_chain() if not peek: self._host_states[host.name] = s @@ -417,7 +417,11 @@ class PlayIterator: else: return True elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state): - return True + cur_block = self._blocks[state.cur_block] + if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0: + return False + else: + return True elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state): return True elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state): diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 095e6b338db..b16316ab2e6 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -46,6 +46,7 @@ class Block(Base, Become, Conditional, Taggable): self._role = role self._task_include = None self._parent_block = None + self._dep_chain = None self._use_handlers = use_handlers self._implicit = implicit @@ -54,11 +55,6 @@ class Block(Base, Become, Conditional, Taggable): elif parent_block: self._parent_block = parent_block - if parent_block: - self._dep_chain = parent_block._dep_chain[:] - else: - self._dep_chain = [] - super(Block, self).__init__() def get_vars(self): @@ -153,6 +149,15 @@ class Block(Base, Become, Conditional, Taggable): except AssertionError: raise AnsibleParserError("A malformed block was encountered.", obj=self._ds) + def get_dep_chain(self): + if self._dep_chain is None: + if self._parent_block: + return self._parent_block.get_dep_chain() + else: + return None + else: + return self._dep_chain[:] + def copy(self, exclude_parent=False, exclude_tasks=False): def _dupe_task_list(task_list, new_block): new_task_list = [] @@ -169,7 +174,9 @@ class Block(Base, Become, Conditional, Taggable): new_me = super(Block, self).copy() new_me._play = self._play new_me._use_handlers = self._use_handlers - new_me._dep_chain = self._dep_chain[:] + + if self._dep_chain: + new_me._dep_chain = self._dep_chain[:] if not exclude_tasks: new_me.block = _dupe_task_list(self.block or [], new_me) @@ -201,7 +208,7 @@ class Block(Base, Become, Conditional, Taggable): if attr not in ('block', 'rescue', 'always'): data[attr] = getattr(self, attr) - data['dep_chain'] = self._dep_chain + data['dep_chain'] = self.get_dep_chain() if self._role is not None: data['role'] = self._role.serialize() @@ -226,7 +233,7 @@ class Block(Base, Become, Conditional, Taggable): if attr in data and attr not in ('block', 'rescue', 'always'): setattr(self, attr, data.get(attr)) - self._dep_chain = data.get('dep_chain', []) + self._dep_chain = data.get('dep_chain', None) # if there was a serialized role, unpack it too role_data = data.get('role') @@ -247,10 +254,12 @@ class Block(Base, Become, Conditional, Taggable): pb = Block() pb.deserialize(pb_data) self._parent_block = pb + self._dep_chain = self._parent_block.get_dep_chain() def evaluate_conditional(self, templar, all_vars): - if len(self._dep_chain): - for dep in self._dep_chain: + dep_chain = self.get_dep_chain() + if dep_chain: + for dep in dep_chain: if not dep.evaluate_conditional(templar, all_vars): return False if self._task_include is not None: @@ -274,8 +283,10 @@ class Block(Base, Become, Conditional, Taggable): if self._task_include: self._task_include.set_loader(loader) - for dep in self._dep_chain: - dep.set_loader(loader) + dep_chain = self.get_dep_chain() + if dep_chain: + for dep in dep_chain: + dep.set_loader(loader) def _get_parent_attribute(self, attr, extend=False): ''' @@ -305,10 +316,10 @@ class Block(Base, Become, Conditional, Taggable): else: value = parent_value - if len(self._dep_chain) and (value is None or extend): - reverse_dep_chain = self._dep_chain[:] - reverse_dep_chain.reverse() - for dep in reverse_dep_chain: + dep_chain = self.get_dep_chain() + if dep_chain and (value is None or extend): + dep_chain.reverse() + for dep in dep_chain: dep_value = getattr(dep, attr, None) if extend: value = self._extend_value(value, dep_value) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index f192ea6c945..9b406ae7ba4 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -262,10 +262,11 @@ class Role(Base, Become, Conditional, Taggable): def get_inherited_vars(self, dep_chain=[], include_params=True): inherited_vars = dict() - for parent in dep_chain: - inherited_vars = combine_vars(inherited_vars, parent._role_vars) - if include_params: - inherited_vars = combine_vars(inherited_vars, parent._role_params) + if dep_chain: + for parent in dep_chain: + inherited_vars = combine_vars(inherited_vars, parent._role_vars) + if include_params: + inherited_vars = combine_vars(inherited_vars, parent._role_params) return inherited_vars def get_role_params(self): diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 34db52a77dc..49c1bdfb350 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -210,8 +210,10 @@ class StrategyBase: [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts] else: iterator.mark_host_failed(host) - (state, tmp_task) = iterator.get_next_task_for_host(host, peek=True) - if not state or state.run_state != PlayIterator.ITERATING_RESCUE: + + # only add the host to the failed list officially if it has + # been failed by the iterator + if iterator.is_failed(host): self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) else: diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index 5431f96f46d..74ac9743b7f 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -58,7 +58,7 @@ class StrategyModule(StrategyBase): work_to_do = True while work_to_do and not self._tqm._terminated: - hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] + hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts and not iterator.is_failed(host)] if len(hosts_left) == 0: self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') result = False diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 846c3d0cf3e..0d4faf61493 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -162,7 +162,7 @@ class StrategyModule(StrategyBase): try: display.debug("getting the remaining hosts for this loop") - hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts] + hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts and not iterator.is_failed(host)] display.debug("done getting the remaining hosts for this loop") # queue up this task for each host in the inventory From 22aaff5af76edb82eb8cf16db91926664994988d Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Fri, 12 Feb 2016 17:06:20 -0500 Subject: [PATCH 0629/1113] adds new action plugin iosxr_template This adds a new action plugin iosxr_template that allows the iosxr_template module to pass network device configurations through the template engine. It also allows configurations to be backed up. --- lib/ansible/plugins/action/iosxr_template.py | 28 ++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 lib/ansible/plugins/action/iosxr_template.py diff --git a/lib/ansible/plugins/action/iosxr_template.py b/lib/ansible/plugins/action/iosxr_template.py new file mode 100644 index 00000000000..5334b644d32 --- /dev/null +++ b/lib/ansible/plugins/action/iosxr_template.py @@ -0,0 +1,28 @@ +# +# Copyright 2015 Peter Sprygada <psprygada@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.action import ActionBase +from ansible.plugins.action.net_template import ActionModule as NetActionModule + +class ActionModule(NetActionModule, ActionBase): + pass + + From 06b072c1f7daca45fb205c6085f5662cf7f33971 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 13 Feb 2016 01:02:47 -0500 Subject: [PATCH 0630/1113] Fix bugs related to task_includes and dep chain inheritance * Fix the way task_include fields were created and copied * Have blocks get_dep_chain() look at task_include's blocks for proper dep chain inheritance * Fix the way task_include fields are copied to prevent a recursive degradation Fixes #14460 --- lib/ansible/playbook/block.py | 7 +++++-- lib/ansible/plugins/strategy/__init__.py | 8 ++------ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index b16316ab2e6..1c9569de8da 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -153,6 +153,8 @@ class Block(Base, Become, Conditional, Taggable): if self._dep_chain is None: if self._parent_block: return self._parent_block.get_dep_chain() + elif self._task_include: + return self._task_include._block.get_dep_chain() else: return None else: @@ -193,7 +195,8 @@ class Block(Base, Become, Conditional, Taggable): new_me._task_include = None if self._task_include: - new_me._task_include = self._task_include.copy() + new_me._task_include = self._task_include.copy(exclude_block=True) + new_me._task_include._block = self._task_include._block.copy(exclude_tasks=True) return new_me @@ -374,7 +377,7 @@ class Block(Base, Become, Conditional, Taggable): return tmp_list def evaluate_block(block): - new_block = self.copy() + new_block = self.copy(exclude_tasks=True) new_block.block = evaluate_and_append_task(block.block) new_block.rescue = evaluate_and_append_task(block.rescue) new_block.always = evaluate_and_append_task(block.always) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 49c1bdfb350..29d67808765 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -452,7 +452,7 @@ class StrategyBase: block_list = load_list_of_blocks( data, play=included_file._task._block._play, - parent_block=included_file._task._block, + parent_block=None, task_include=included_file._task, role=included_file._task._role, use_handlers=is_handler, @@ -478,11 +478,7 @@ class StrategyBase: # set the vars for this task from those specified as params to the include for b in block_list: # first make a copy of the including task, so that each has a unique copy to modify - # FIXME: not sure if this is the best way to fix this, as we might be losing - # information in the copy. Previously we assigned the include params to - # the block variables directly, which caused other problems, so we may - # need to figure out a third option if this also presents problems. - b._task_include = b._task_include.copy(exclude_block=True) + b._task_include = b._task_include.copy() # then we create a temporary set of vars to ensure the variable reference is unique temp_vars = b._task_include.vars.copy() temp_vars.update(included_file._args.copy()) From 0bb49090cf08ee20883a82614eb4dd14fa1f480e Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 13 Feb 2016 20:12:37 -0500 Subject: [PATCH 0631/1113] make some options not mandatory to pass this should simplify api calls --- lib/ansible/playbook/play_context.py | 34 +++++++++------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index f7b38e9e79d..d465789d78f 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -251,35 +251,23 @@ class PlayContext(Base): options specified by the user on the command line. These have a lower precedence than those set on the play or host. ''' - - if options.connection: - self.connection = options.connection - - self.remote_user = options.remote_user - self.private_key_file = options.private_key_file - self.ssh_common_args = options.ssh_common_args - self.sftp_extra_args = options.sftp_extra_args - self.scp_extra_args = options.scp_extra_args - self.ssh_extra_args = options.ssh_extra_args - # privilege escalation self.become = options.become self.become_method = options.become_method self.become_user = options.become_user + self.check_mode = boolean(options.check) + + # get ssh options FIXME: make these common to all connections + for flag in ['ssh_common_args', 'sftp_extra_args', 'scp_extra_args', 'ssh_extra_args']: + setattr(self, flag, getattr(options,flag, '')) + # general flags (should we move out?) - if options.verbosity: - self.verbosity = options.verbosity - if options.check: - self.check_mode = boolean(options.check) - if hasattr(options, 'force_handlers') and options.force_handlers: - self.force_handlers = boolean(options.force_handlers) - if hasattr(options, 'step') and options.step: - self.step = boolean(options.step) - if hasattr(options, 'start_at_task') and options.start_at_task: - self.start_at_task = to_unicode(options.start_at_task) - if hasattr(options, 'diff') and options.diff: - self.diff = boolean(options.diff) + for flag in ['connection','remote_user', 'private_key_file', 'verbosity', 'force_handlers', 'step', 'start_at_task', 'diff']: + attribute = getattr(options, flag, False) + if attribute: + setattr(self, flag, attribute) + if hasattr(options, 'timeout') and options.timeout: self.timeout = int(options.timeout) From 4b953c4b161c5e9fdaed60596ac9dc48bd7e6d91 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 13 Feb 2016 19:40:05 -0500 Subject: [PATCH 0632/1113] extended api example with more options in tasks also adopted to less requried options --- docsite/rst/developing_api.rst | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docsite/rst/developing_api.rst b/docsite/rst/developing_api.rst index d911d4b4e84..c6f66709c9d 100644 --- a/docsite/rst/developing_api.rst +++ b/docsite/rst/developing_api.rst @@ -46,11 +46,11 @@ In 2.0 things get a bit more complicated to start, but you end up with much more from ansible.playbook.play import Play from ansible.executor.task_queue_manager import TaskQueueManager - Options = namedtuple('Options', ['connection','module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check']) + Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check']) # initialize needed objects variable_manager = VariableManager() loader = DataLoader() - options = Options(connection='local', module_path='/path/to/mymodules', forks=100, remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, become_user=None, verbosity=None, check=False) + options = Options(connection='local', module_path='/path/to/mymodules', forks=100, become=None, become_method=None, become_user=None, check=False) passwords = dict(vault_pass='secret') # create inventory and pass to var manager @@ -62,7 +62,10 @@ In 2.0 things get a bit more complicated to start, but you end up with much more name = "Ansible Play", hosts = 'localhost', gather_facts = 'no', - tasks = [ dict(action=dict(module='debug', args=dict(msg='Hello Galaxy!'))) ] + tasks = [ + dict(action=dict(module='shell', args='ls'), register='shell_out'), + dict(action=dict(module='debug', args=dict(msg='{{shell_out.stdout}}'))) + ] ) play = Play().load(play_source, variable_manager=variable_manager, loader=loader) From 369cb8fa9f64578ce2c8e0252921caa42020487a Mon Sep 17 00:00:00 2001 From: William Durand <will+git@drnd.me> Date: Sun, 14 Feb 2016 17:04:26 +0100 Subject: [PATCH 0633/1113] [contrib] Add option to define group vars in DigitalOcean dynamic inventory script --- contrib/inventory/digital_ocean.ini | 6 ++++++ contrib/inventory/digital_ocean.py | 26 +++++++++++++++++--------- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/contrib/inventory/digital_ocean.ini b/contrib/inventory/digital_ocean.ini index 01afe33968d..b809554b20f 100644 --- a/contrib/inventory/digital_ocean.ini +++ b/contrib/inventory/digital_ocean.ini @@ -26,3 +26,9 @@ cache_max_age = 300 # Use the private network IP address instead of the public when available. # use_private_network = False + +# Pass variables to every group, e.g.: +# +# group_variables = { 'ansible_user': 'root' } +# +group_variables = {} diff --git a/contrib/inventory/digital_ocean.py b/contrib/inventory/digital_ocean.py index 1c0ef68cff4..8eeeba8de67 100755 --- a/contrib/inventory/digital_ocean.py +++ b/contrib/inventory/digital_ocean.py @@ -137,6 +137,7 @@ import re import argparse from time import time import ConfigParser +import ast try: import json @@ -168,6 +169,7 @@ class DigitalOceanInventory(object): self.cache_path = '.' self.cache_max_age = 0 self.use_private_network = False + self.group_variables = {} # Read settings, environment variables, and CLI arguments self.read_settings() @@ -261,6 +263,10 @@ or environment variables (DO_API_TOKEN)''') if config.has_option('digital_ocean', 'use_private_network'): self.use_private_network = config.get('digital_ocean', 'use_private_network') + # Group variables + if config.has_option('digital_ocean', 'group_variables'): + self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) + def read_environment(self): ''' Reads the settings from environment variables ''' # Setup credentials @@ -359,22 +365,24 @@ or environment variables (DO_API_TOKEN)''') else: dest = droplet['ip_address'] - self.inventory[droplet['id']] = [dest] - self.push(self.inventory, droplet['name'], dest) - self.push(self.inventory, 'region_' + droplet['region']['slug'], dest) - self.push(self.inventory, 'image_' + str(droplet['image']['id']), dest) - self.push(self.inventory, 'size_' + droplet['size']['slug'], dest) + dest = { 'hosts': [ dest ], 'vars': self.group_variables } + + self.inventory[droplet['id']] = dest + self.inventory[droplet['name']] = dest + self.inventory['region_' + droplet['region']['slug']] = dest + self.inventory['image_' + str(droplet['image']['id'])] = dest + self.inventory['size_' + droplet['size']['slug']] = dest image_slug = droplet['image']['slug'] if image_slug: - self.push(self.inventory, 'image_' + self.to_safe(image_slug), dest) + self.inventory['image_' + self.to_safe(image_slug)] = dest else: image_name = droplet['image']['name'] if image_name: - self.push(self.inventory, 'image_' + self.to_safe(image_name), dest) + self.inventory['image_' + self.to_safe(image_name)] = dest - self.push(self.inventory, 'distro_' + self.to_safe(droplet['image']['distribution']), dest) - self.push(self.inventory, 'status_' + droplet['status'], dest) + self.inventory['distro_' + self.to_safe(droplet['image']['distribution'])] = dest + self.inventory['status_' + droplet['status']] = dest def load_droplet_variables_for_host(self): From aea8900abe7aad4cbda1da7624417137acd21f3d Mon Sep 17 00:00:00 2001 From: Aleksi Aalto <aga@iki.fi> Date: Mon, 15 Feb 2016 01:54:52 +0200 Subject: [PATCH 0634/1113] document exit status for ansible-playbook --- docs/man/man1/ansible-playbook.1.asciidoc.in | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in index 82181982fb7..289e7917ddd 100644 --- a/docs/man/man1/ansible-playbook.1.asciidoc.in +++ b/docs/man/man1/ansible-playbook.1.asciidoc.in @@ -204,6 +204,24 @@ up to three times for more output. Show program's version number and exit. +EXIT STATUS +----------- + +*0* -- OK or no hosts matched + +*1* -- Error + +*2* -- One or more hosts failed + +*3* -- One or more hosts were unreachable + +*4* -- Parser error + +*5* -- Bad or incomplete options + +*99* -- User interrupted execution + +*250* -- Unexpected error ENVIRONMENT ----------- From 5d49f4e629f510ff8e5a169c5c0189eb603fca34 Mon Sep 17 00:00:00 2001 From: Pascal Grange <pascal1.grange@orange.com> Date: Mon, 15 Feb 2016 10:18:44 +0100 Subject: [PATCH 0635/1113] Fix related to #13981 When working around "bad systems that insist on not allowing updates in an atomic manner", we should not run previous exception management code that tries to perform atomic move in case of exception since the dirty non atomic move has already been performed. --- lib/ansible/module_utils/basic.py | 62 +++++++++++++++---------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 3c14af7fbbb..8d87511ae49 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1758,39 +1758,39 @@ class AnsibleModule(object): # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied) # and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) - - dest_dir = os.path.dirname(dest) - dest_file = os.path.basename(dest) - try: - tmp_dest = tempfile.NamedTemporaryFile( - prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file) - except (OSError, IOError): - e = get_exception() - self.fail_json(msg='The destination directory (%s) is not writable by the current user.' % dest_dir) - - try: # leaves tmp file behind when sudo and not root - if switched_user and os.getuid() != 0: - # cleanup will happen by 'rm' of tempdir - # copy2 will preserve some metadata - shutil.copy2(src, tmp_dest.name) - else: - shutil.move(src, tmp_dest.name) - if self.selinux_enabled(): - self.set_context_if_different( - tmp_dest.name, context, False) + else: + dest_dir = os.path.dirname(dest) + dest_file = os.path.basename(dest) try: - tmp_stat = os.stat(tmp_dest.name) - if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid): - os.chown(tmp_dest.name, dest_stat.st_uid, dest_stat.st_gid) - except OSError: + tmp_dest = tempfile.NamedTemporaryFile( + prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file) + except (OSError, IOError): e = get_exception() - if e.errno != errno.EPERM: - raise - os.rename(tmp_dest.name, dest) - except (shutil.Error, OSError, IOError): - e = get_exception() - self.cleanup(tmp_dest.name) - self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) + self.fail_json(msg='The destination directory (%s) is not writable by the current user.' % dest_dir) + + try: # leaves tmp file behind when sudo and not root + if switched_user and os.getuid() != 0: + # cleanup will happen by 'rm' of tempdir + # copy2 will preserve some metadata + shutil.copy2(src, tmp_dest.name) + else: + shutil.move(src, tmp_dest.name) + if self.selinux_enabled(): + self.set_context_if_different( + tmp_dest.name, context, False) + try: + tmp_stat = os.stat(tmp_dest.name) + if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid): + os.chown(tmp_dest.name, dest_stat.st_uid, dest_stat.st_gid) + except OSError: + e = get_exception() + if e.errno != errno.EPERM: + raise + os.rename(tmp_dest.name, dest) + except (shutil.Error, OSError, IOError): + e = get_exception() + self.cleanup(tmp_dest.name) + self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) if creating: # make sure the file has the correct permissions From 889c273ac41914fabb65e7e379a92054cfddb258 Mon Sep 17 00:00:00 2001 From: Illia Krauchanka <fomistoklus+github@gmail.com> Date: Mon, 15 Feb 2016 15:53:34 +0300 Subject: [PATCH 0636/1113] Update playbooks_lookups.rst --- docsite/rst/playbooks_lookups.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst index 3c2222c337b..454036b41f7 100644 --- a/docsite/rst/playbooks_lookups.rst +++ b/docsite/rst/playbooks_lookups.rst @@ -25,6 +25,7 @@ The file lookup is the most basic lookup type. Contents can be read off the filesystem as follows:: + --- - hosts: all vars: contents: "{{ lookup('file', '/etc/foo.txt') }}" From 0c1395ed2fe9fff55f1da112f183f38fc117a0cb Mon Sep 17 00:00:00 2001 From: Kamal Wood <kamalwood@hotmail.com> Date: Mon, 15 Feb 2016 14:06:21 +0000 Subject: [PATCH 0637/1113] Documenting that values passed in using the extra-vars 'key=value' syntax are interpreted as strings. --- docsite/rst/playbooks_variables.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 122c0ef9232..47277b0bce9 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -730,6 +730,9 @@ As of Ansible 1.2, you can also pass in extra vars as quoted JSON, like so:: The ``key=value`` form is obviously simpler, but it's there if you need it! +.. note:: Values passed in using the ``key=value`` syntax are interpreted as strings. + Use the JSON format if you need to pass in anything that shouldn't be a string (Booleans, integers, floats, lists etc). + As of Ansible 1.3, extra vars can be loaded from a JSON file with the ``@`` syntax:: --extra-vars "@some_file.json" From bc41f46fece6838ee240739e15affe9d7ddbdf14 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 15 Feb 2016 12:08:07 -0500 Subject: [PATCH 0638/1113] pushed non-atomic to option of last resort try to copy into place first --- lib/ansible/module_utils/basic.py | 41 +++++++++++++++++-------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 8d87511ae49..df591444d99 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1737,24 +1737,7 @@ class AnsibleModule(object): os.rename(src, dest) except (IOError, OSError): e = get_exception() - # sadly there are some situations where we cannot ensure atomicity, but only if - # the user insists and we get the appropriate error we update the file unsafely - if unsafe_writes and e.errno == errno.EBUSY: - #TODO: issue warning that this is an unsafe operation, but doing it cause user insists - try: - try: - out_dest = open(dest, 'wb') - in_src = open(src, 'rb') - shutil.copyfileobj(in_src, out_dest) - finally: # assuring closed files in 2.4 compatible way - if out_dest: - out_dest.close() - if in_src: - in_src.close() - except (shutil.Error, OSError, IOError): - e = get_exception() - self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e)) - elif e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]: + if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]: # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied) # and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) @@ -1789,8 +1772,28 @@ class AnsibleModule(object): os.rename(tmp_dest.name, dest) except (shutil.Error, OSError, IOError): e = get_exception() + # sadly there are some situations where we cannot ensure atomicity, but only if + # the user insists and we get the appropriate error we update the file unsafely + if unsafe_writes and e.errno == errno.EBUSY: + #TODO: issue warning that this is an unsafe operation, but doing it cause user insists + try: + try: + out_dest = open(dest, 'wb') + in_src = open(src, 'rb') + shutil.copyfileobj(in_src, out_dest) + finally: # assuring closed files in 2.4 compatible way + if out_dest: + out_dest.close() + if in_src: + in_src.close() + except (shutil.Error, OSError, IOError): + e = get_exception() + self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e)) + + else: + self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) + self.cleanup(tmp_dest.name) - self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) if creating: # make sure the file has the correct permissions From 6dc910c13a944c45303f6993db7056547208135c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 9 Feb 2016 19:05:41 -0500 Subject: [PATCH 0639/1113] shell + become fixes 1 less level of shell + quoting simplified become commands, less quote and shell --- lib/ansible/playbook/play_context.py | 8 ++++---- lib/ansible/plugins/action/__init__.py | 6 +++--- test/units/playbook/test_play_context.py | 12 ++++++------ test/units/plugins/action/test_action.py | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index d465789d78f..a7c333a5520 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -464,8 +464,7 @@ class PlayContext(Base): return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data)) prompt = detect_su_prompt - su_success_cmd = '%s -c %s' % (executable, success_cmd) # this is here cause su too succeptible to overquoting - becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, su_success_cmd) #works with sh + becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, pipes.quote('%s -c %s' % (executable, success_cmd))) elif self.become_method == 'pbrun': @@ -479,7 +478,7 @@ class PlayContext(Base): elif self.become_method == 'runas': raise AnsibleError("'runas' is not yet implemented") - #TODO: figure out prompt + #FIXME: figure out prompt # this is not for use with winrm plugin but if they ever get ssh native on windoez becomecmd = '%s %s /user:%s "%s"' % (exe, flags, self.become_user, success_cmd) @@ -494,6 +493,7 @@ class PlayContext(Base): if self.become_user: flags += ' -u %s ' % self.become_user + #FIXME: make shell independant becomecmd = '%s %s echo %s && %s %s env ANSIBLE=true %s' % (exe, flags, success_key, exe, flags, cmd) else: @@ -502,7 +502,7 @@ class PlayContext(Base): if self.become_pass: self.prompt = prompt self.success_key = success_key - return ('%s -c %s' % (executable, pipes.quote(becomecmd))) + return becomecmd return cmd diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 9a149895a36..dfa44645927 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -507,9 +507,6 @@ class ActionBase(with_metaclass(ABCMeta, object)): replacement strategy (python3 could use surrogateescape) ''' - if executable is not None and self._connection.allow_executable: - cmd = executable + ' -c ' + pipes.quote(cmd) - display.debug("_low_level_execute_command(): starting") if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) @@ -522,6 +519,9 @@ class ActionBase(with_metaclass(ABCMeta, object)): display.debug("_low_level_execute_command(): using become for this command") cmd = self._play_context.make_become_cmd(cmd, executable=executable) + if executable is not None and self._connection.allow_executable: + cmd = executable + ' -c ' + pipes.quote(cmd) + display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) diff --git a/test/units/playbook/test_play_context.py b/test/units/playbook/test_play_context.py index cc9441dab87..2e2c2238cc2 100644 --- a/test/units/playbook/test_play_context.py +++ b/test/units/playbook/test_play_context.py @@ -140,28 +140,28 @@ class TestPlayContext(unittest.TestCase): play_context.become_method = 'sudo' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") - self.assertEqual(cmd, """%s -c '%s %s -u %s %s -c '"'"'echo %s; %s'"'"''""" % (default_exe, sudo_exe, sudo_flags, play_context.become_user, default_exe, play_context.success_key, default_cmd)) + self.assertEqual(cmd, """%s %s -u %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, play_context.become_user, default_exe, play_context.success_key, default_cmd)) play_context.become_pass = 'testpass' cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) - self.assertEqual(cmd, """%s -c '%s %s -p "%s" -u %s %s -c '"'"'echo %s; %s'"'"''""" % (default_exe, sudo_exe, sudo_flags.replace('-n',''), play_context.prompt, play_context.become_user, default_exe, play_context.success_key, default_cmd)) + self.assertEqual(cmd, """%s %s -p "%s" -u %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags.replace('-n',''), play_context.prompt, play_context.become_user, default_exe, play_context.success_key, default_cmd)) play_context.become_pass = None play_context.become_method = 'su' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") - self.assertEqual(cmd, """%s -c '%s %s -c %s -c '"'"'echo %s; %s'"'"''""" % (default_exe, su_exe, play_context.become_user, default_exe, play_context.success_key, default_cmd)) + self.assertEqual(cmd, """%s %s -c '%s -c '"'"'echo %s; %s'"'"''""" % (su_exe, play_context.become_user, default_exe, play_context.success_key, default_cmd)) play_context.become_method = 'pbrun' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") - self.assertEqual(cmd, """%s -c '%s -b %s -u %s '"'"'echo %s; %s'"'"''""" % (default_exe, pbrun_exe, pbrun_flags, play_context.become_user, play_context.success_key, default_cmd)) + self.assertEqual(cmd, """%s -b %s -u %s 'echo %s; %s'""" % (pbrun_exe, pbrun_flags, play_context.become_user, play_context.success_key, default_cmd)) play_context.become_method = 'pfexec' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") - self.assertEqual(cmd, """%s -c '%s %s "'"'"'echo %s; %s'"'"'"'""" % (default_exe, pfexec_exe, pfexec_flags, play_context.success_key, default_cmd)) + self.assertEqual(cmd, '''%s %s "'echo %s; %s'"''' % (pfexec_exe, pfexec_flags, play_context.success_key, default_cmd)) play_context.become_method = 'doas' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") - self.assertEqual(cmd, """%s -c '%s %s echo %s && %s %s env ANSIBLE=true %s'""" % (default_exe, doas_exe, doas_flags, play_context.success_key, doas_exe, doas_flags, default_cmd)) + self.assertEqual(cmd, """%s %s echo %s && %s %s env ANSIBLE=true %s""" % (doas_exe, doas_flags, play_context.success_key, doas_exe, doas_flags, default_cmd)) play_context.become_method = 'bad' self.assertRaises(AnsibleError, play_context.make_become_cmd, cmd=default_cmd, executable="/bin/bash") diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index afb5d767e10..401d1363e3e 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -49,7 +49,7 @@ class TestActionBase(unittest.TestCase): play_context.remote_user = 'apo' action_base._low_level_execute_command('ECHO', sudoable=True) - play_context.make_become_cmd.assert_called_once_with("/bin/sh -c ECHO", executable='/bin/sh') + play_context.make_become_cmd.assert_called_once_with("ECHO", executable='/bin/sh') play_context.make_become_cmd.reset_mock() @@ -58,6 +58,6 @@ class TestActionBase(unittest.TestCase): try: play_context.remote_user = 'root' action_base._low_level_execute_command('ECHO SAME', sudoable=True) - play_context.make_become_cmd.assert_called_once_with("/bin/sh -c 'ECHO SAME'", executable='/bin/sh') + play_context.make_become_cmd.assert_called_once_with("ECHO SAME", executable='/bin/sh') finally: C.BECOME_ALLOW_SAME_USER = become_allow_same_user From 6ddea3e915012a381b53e34b19c2270cf5e4e815 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 15 Feb 2016 14:13:20 -0500 Subject: [PATCH 0640/1113] removed follow from common file docs --- lib/ansible/utils/module_docs_fragments/files.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/lib/ansible/utils/module_docs_fragments/files.py b/lib/ansible/utils/module_docs_fragments/files.py index 9bc96c6e257..4a79e394fda 100644 --- a/lib/ansible/utils/module_docs_fragments/files.py +++ b/lib/ansible/utils/module_docs_fragments/files.py @@ -60,12 +60,4 @@ options: - level part of the SELinux file context. This is the MLS/MCS attribute, sometimes known as the C(range). C(_default) feature works as for I(seuser). - follow: - required: false - default: "no" - choices: [ "yes", "no" ] - version_added: "1.8" - description: - - 'This flag indicates that filesystem links, if they exist, should be followed.' - """ From 8edcca0ef5a0dea6df6c65a9d8c407965c7a4f74 Mon Sep 17 00:00:00 2001 From: George Christou <gechrr@gmail.com> Date: Mon, 15 Feb 2016 22:18:59 +0000 Subject: [PATCH 0641/1113] Add simple --diff colour support --- lib/ansible/plugins/callback/__init__.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 1fa6c03753b..ce028dc2793 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -28,6 +28,7 @@ from ansible.compat.six import string_types from ansible import constants as C from ansible.vars import strip_internal_keys +from ansible.utils.color import stringc from ansible.utils.unicode import to_unicode try: @@ -134,9 +135,17 @@ class CallbackBase: fromfiledate='', tofiledate='', n=10) - difflines = list(differ) - if difflines: - ret.extend(difflines) + has_diff = False + for line in differ: + has_diff = True + if line.startswith('-'): + line = stringc(line, 'red') + elif line.startswith('+'): + line = stringc(line, 'green') + elif line.startswith('@@'): + line = stringc(line, 'cyan') + ret.append(line) + if has_diff: ret.append('\n') if 'prepared' in diff: ret.append(to_unicode(diff['prepared'])) From c3b30d251f76bba38d1f68688173e49685ac40c0 Mon Sep 17 00:00:00 2001 From: Yair Fried <yfried@redhat.com> Date: Tue, 16 Feb 2016 13:19:18 +0200 Subject: [PATCH 0642/1113] Fix openstack auth type Otherwise auth type defaults to 'str' and shade fails --- lib/ansible/module_utils/openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py index 934d51a271c..6dbf04a68a7 100644 --- a/lib/ansible/module_utils/openstack.py +++ b/lib/ansible/module_utils/openstack.py @@ -74,7 +74,7 @@ def openstack_full_argument_spec(**kwargs): spec = dict( cloud=dict(default=None), auth_type=dict(default=None), - auth=dict(default=None, no_log=True), + auth=dict(default=None, type='dict', no_log=True), region_name=dict(default=None), availability_zone=dict(default=None), verify=dict(default=True, aliases=['validate_certs']), From b9d0662faf2910dea503c0c8c35ad9951a4c6e9c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 15 Feb 2016 17:11:49 -0500 Subject: [PATCH 0643/1113] use stat module instead of checksum code - added new function for action plugins this avoids the very fragile checksum code that is shell dependant. - ported copy module to it - converted assemble to new stat function - some corrections and ported temlpate - updated old checksum function to use new stat one under the hood - documented revamped remote checksum method --- lib/ansible/plugins/action/__init__.py | 60 ++++++++++++++++++-------- lib/ansible/plugins/action/assemble.py | 5 ++- lib/ansible/plugins/action/copy.py | 17 ++++---- lib/ansible/plugins/action/template.py | 19 +++----- 4 files changed, 62 insertions(+), 39 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 9a149895a36..4ed496fbe98 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -291,28 +291,54 @@ class ActionBase(with_metaclass(ABCMeta, object)): res = self._low_level_execute_command(cmd, sudoable=sudoable) return res + def _execute_remote_stat(self, path, all_vars, follow): + ''' + Get information from remote file. + ''' + module_args=dict( + path=path, + follow=follow, + get_md5=False, + get_checksum=True, + checksum_algo='sha1', + ) + mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars) + + if 'failed' in mystat and mystat['failed']: + raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, mystat['msg'])) + + if not mystat['stat']['exists']: + # empty might be matched, 1 should never match, also backwards compatible + mystat['stat']['checksum'] = '1' + + return mystat['stat'] + def _remote_checksum(self, path, all_vars): ''' - Takes a remote checksum and returns 1 if no file + Produces a remote checksum given a path, + Returns a number 0-4 for specific errors instead of checksum, also ensures it is different + 0 = unknown error + 1 = file does not exist, this might not be an error + 2 = permissions issue + 3 = its a directory, not a file + 4 = stat module failed, likely due to not finding python ''' - - python_interp = all_vars.get('ansible_python_interpreter', 'python') - - cmd = self._connection._shell.checksum(path, python_interp) - data = self._low_level_execute_command(cmd, sudoable=True) + x = "0" # unknown error has occured try: - data2 = data['stdout'].strip().splitlines()[-1] - if data2 == u'': - # this may happen if the connection to the remote server - # failed, so just return "INVALIDCHECKSUM" to avoid errors - return "INVALIDCHECKSUM" + remote_stat = self._execute_remote_stat(path, all_vars, follow=False) + if remote_stat['exists'] and remote_stat['isdir']: + x = "3" # its a directory not a file else: - return data2.split()[0] - except IndexError: - display.warning(u"Calculating checksum failed unusually, please report this to " - u"the list so it can be fixed\ncommand: %s\n----\noutput: %s\n----\n" % (to_unicode(cmd), data)) - # this will signal that it changed and allow things to keep going - return "INVALIDCHECKSUM" + x = remote_stat['checksum'] # if 1, file is missing + except AnsibleError as e: + errormsg = to_bytes(e) + if errormsg.endswith('Permission denied'): + x = "2" # cannot read file + elif errormsg.endswith('MODULE FAILURE'): + x = "4" # python not found or module uncaught exception + finally: + return x + def _remote_expand_user(self, path): ''' takes a remote path and performs tilde expansion on the remote host ''' diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py index aae105400fd..4bbecbd25a0 100644 --- a/lib/ansible/plugins/action/assemble.py +++ b/lib/ansible/plugins/action/assemble.py @@ -89,6 +89,7 @@ class ActionModule(ActionBase): delimiter = self._task.args.get('delimiter', None) remote_src = self._task.args.get('remote_src', 'yes') regexp = self._task.args.get('regexp', None) + follow = self._task.args.get('follow', False) ignore_hidden = self._task.args.get('ignore_hidden', False) if src is None or dest is None: @@ -119,10 +120,10 @@ class ActionModule(ActionBase): path_checksum = checksum_s(path) dest = self._remote_expand_user(dest) - remote_checksum = self._remote_checksum(dest, all_vars=task_vars) + dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow) diff = {} - if path_checksum != remote_checksum: + if path_checksum != dest_stat['checksum']: resultant = file(path).read() if self._play_context.diff: diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index 17c22865300..f9cd4c59030 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -46,6 +46,7 @@ class ActionModule(ActionBase): force = boolean(self._task.args.get('force', 'yes')) faf = self._task.first_available_file remote_src = boolean(self._task.args.get('remote_src', False)) + follow = boolean(self._task.args.get('follow', False)) if (source is None and content is None and faf is None) or dest is None: result['failed'] = True @@ -167,11 +168,11 @@ class ActionModule(ActionBase): else: dest_file = self._connection._shell.join_path(dest) - # Attempt to get the remote checksum - remote_checksum = self._remote_checksum(dest_file, all_vars=task_vars) + # Attempt to get remote file info + dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow) - if remote_checksum == '3': - # The remote_checksum was executed on a directory. + if dest_status['exists'] and dest_status['isdir']: + # The dest is a directory. if content is not None: # If source was defined as content remove the temporary file and fail out. self._remove_tempfile_if_content_defined(content, content_tempfile) @@ -179,15 +180,15 @@ class ActionModule(ActionBase): result['msg'] = "can not use content with a dir as dest" return result else: - # Append the relative source location to the destination and retry remote_checksum + # Append the relative source location to the destination and get remote stats again dest_file = self._connection._shell.join_path(dest, source_rel) - remote_checksum = self._remote_checksum(dest_file, all_vars=task_vars) + dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow) - if remote_checksum != '1' and not force: + if not dest_status['exists'] and not force: # remote_file does not exist so continue to next iteration. continue - if local_checksum != remote_checksum: + if local_checksum != dest_status['checksum']: # The checksums don't match and we will change or error out. changed = True diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index d8339e57b90..c5c98861fb9 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -34,23 +34,18 @@ class ActionModule(ActionBase): TRANSFERS_FILES = True def get_checksum(self, dest, all_vars, try_directory=False, source=None): - remote_checksum = self._remote_checksum(dest, all_vars=all_vars) + try: + dest_stat = self._execute_remote_stat(dest, all_vars=all_vars, follow=False) - if remote_checksum in ('0', '2', '3', '4'): - # Note: 1 means the file is not present which is fine; template - # will create it. 3 means directory was specified instead of file - if try_directory and remote_checksum == '3' and source: + if dest_stat['exists'] and dest_stat['isdir'] and try_directory and source: base = os.path.basename(source) dest = os.path.join(dest, base) - remote_checksum = self.get_checksum(dest, all_vars=all_vars, try_directory=False) - if remote_checksum not in ('0', '2', '3', '4'): - return remote_checksum + dest_stat = self._execute_remote_stat(dest, all_vars=all_vars, follow=False) - result = dict(failed=True, msg="failed to checksum remote file." - " Checksum error code: %s" % remote_checksum) - return result + except Exception as e: + return dict(failed=True, msg=to_bytes(e)) - return remote_checksum + return dest_stat['checksum'] def run(self, tmp=None, task_vars=None): ''' handler for template operations ''' From 6102a4b9b49a9f1f33ad1f7359dfd344dbbca8fa Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 16 Feb 2016 09:42:33 -0500 Subject: [PATCH 0644/1113] template also when only comments present --- lib/ansible/template/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index bec34f81aa7..2b07bcae200 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -370,7 +370,10 @@ class Templar: ''' returns True if the data contains a variable pattern ''' - return self.environment.block_start_string in data or self.environment.variable_start_string in data + for marker in [self.environment.block_start_string, self.environment.variable_start_string, self.environment.comment_start_string]: + if marker in data: + return True + return False def _convert_bare_variable(self, variable, bare_deprecated): ''' From 327b27ba1c7813c758845c5f05c6d5649b738d3d Mon Sep 17 00:00:00 2001 From: Adam Hartz <ahartz1@gmail.com> Date: Tue, 16 Feb 2016 09:48:40 -0500 Subject: [PATCH 0645/1113] Update stated location of common_module.py --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index dc5b7e8f5ff..d8a0973b011 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -264,7 +264,7 @@ And failures are just as simple (where 'msg' is a required parameter to explain module.fail_json(msg="Something fatal happened") There are also other useful functions in the module class, such as module.sha1(path). See -lib/ansible/module_common.py in the source checkout for implementation details. +lib/ansible/executor/module_common.py in the source checkout for implementation details. Again, modules developed this way are best tested with the hacking/test-module script in the git source checkout. Because of the magic involved, this is really the only way the scripts From cebf127d685f06e43dab92b08f5c28067b480a7b Mon Sep 17 00:00:00 2001 From: Adam Hartz <ahartz1@gmail.com> Date: Tue, 16 Feb 2016 10:32:57 -0500 Subject: [PATCH 0646/1113] Change reference to basic.py, where the implementation details live --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index d8a0973b011..741ec5231bd 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -264,7 +264,7 @@ And failures are just as simple (where 'msg' is a required parameter to explain module.fail_json(msg="Something fatal happened") There are also other useful functions in the module class, such as module.sha1(path). See -lib/ansible/executor/module_common.py in the source checkout for implementation details. +lib/ansible/module_utils/basic.py in the source checkout for implementation details. Again, modules developed this way are best tested with the hacking/test-module script in the git source checkout. Because of the magic involved, this is really the only way the scripts From 665e158ec05d49b53e0d057ce5debaf5b500e981 Mon Sep 17 00:00:00 2001 From: brianlycett <brian.lycett@ontrackretail.co.uk> Date: Tue, 16 Feb 2016 16:40:44 +0000 Subject: [PATCH 0647/1113] Update YAMLSyntax.rst Updated to include a bit more depth in explaining the YAML, along with a link the Wikipedia page on YAML syntax. --- docsite/rst/YAMLSyntax.rst | 39 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index 8189a6caf6c..4b2ec0535d8 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -42,6 +42,24 @@ A dictionary is represented in a simple ``key: value`` form (the colon must be f job: Developer skill: Elite +More complicated data structures are possible, such as lists of dictionaries, or dictionaries whose values are lists. Or a mix of both:: + + # Employee records + - martin: + name: Martin D'vloper + job: Developer + skills: + - python + - perl + - pascal + - tabitha: + name: Tabitha Bitumen + job: Developer + skills: + - lisp + - fortran + - erlang + Dictionaries and lists can also be represented in an abbreviated form if you really want to:: --- @@ -59,6 +77,17 @@ Ansible doesn't really use these too much, but you can also specify a boolean va likes_emacs: TRUE uses_cvs: false +Values can span multiple lines using *|* or *>* to include newlines or ignore them:: + + ignore_newlines: > + this is really a + single line of text + despite appearances + + include_newlines: | + exactly as you see + will appear these three + lines of poetry Let's combine what we learned so far in an arbitrary YAML example. This really has nothing to do with Ansible, but will give you a feel for the format:: @@ -75,9 +104,13 @@ This really has nothing to do with Ansible, but will give you a feel for the for - Strawberry - Mango languages: - ruby: Elite + perl: Elite python: Elite - dotnet: Lame + pascal: Lame + education: | + 4 GCSEs + 3 A-Levels + BSc in the Internet of Things That's all you really need to know about YAML to start writing `Ansible` playbooks. @@ -116,6 +149,8 @@ In these cases just use quotes:: YAML Lint (online) helps you debug YAML syntax if you are having problems `Github examples directory <https://github.com/ansible/ansible-examples>`_ Complete playbook files from the github project source + `Wikipedia YAML syntax reference <https://en.wikipedia.org/wiki/YAML>`_ + A good guide to YAML syntax `Mailing List <http://groups.google.com/group/ansible-project>`_ Questions? Help? Ideas? Stop by the list on Google Groups `irc.freenode.net <http://irc.freenode.net>`_ From 71dffbc28f0a1f3148d20412b25ae0727b51e4d9 Mon Sep 17 00:00:00 2001 From: Scott Buchanan <scott@eyefruit.com> Date: Tue, 16 Feb 2016 16:19:26 -0500 Subject: [PATCH 0648/1113] Fix leftover references to sudo instead of become Catching a few places that earlier PR #10788 missed. --- docsite/rst/playbooks_intro.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 55cd3359be6..e43dfd475a7 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -180,9 +180,9 @@ Support for running things as another user is also available (see :doc:`become`) --- - hosts: webservers remote_user: yourname - sudo: yes + become: yes -You can also use sudo on a particular task instead of the whole play:: +You can also use become on a particular task instead of the whole play:: --- - hosts: webservers From 56239ee347c8cf156466c06975055faeeca86abe Mon Sep 17 00:00:00 2001 From: George Christou <gechrr@gmail.com> Date: Wed, 17 Feb 2016 10:10:07 +0000 Subject: [PATCH 0649/1113] Make --diff colours configurable --- lib/ansible/constants.py | 3 +++ lib/ansible/plugins/callback/__init__.py | 10 +++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index d277c717b54..6dad199e9b5 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -278,6 +278,9 @@ COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan' COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red') COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green') COLOR_CHANGED = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_CHANGED', 'yellow') +COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green') +COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red') +COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan') # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index ce028dc2793..58904420af0 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -138,12 +138,12 @@ class CallbackBase: has_diff = False for line in differ: has_diff = True - if line.startswith('-'): - line = stringc(line, 'red') - elif line.startswith('+'): - line = stringc(line, 'green') + if line.startswith('+'): + line = stringc(line, C.COLOR_DIFF_ADD) + elif line.startswith('-'): + line = stringc(line, C.COLOR_DIFF_REMOVE) elif line.startswith('@@'): - line = stringc(line, 'cyan') + line = stringc(line, C.COLOR_DIFF_LINES) ret.append(line) if has_diff: ret.append('\n') From 4102812a698cc85f562175f849b3f4b053b93181 Mon Sep 17 00:00:00 2001 From: Michael Scherer <misc@zarb.org> Date: Wed, 17 Feb 2016 11:13:59 +0000 Subject: [PATCH 0650/1113] Remove spurious 'either', fix #14520 --- docsite/rst/intro_inventory.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index 6d9b5977fc1..3f3f2e17d36 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -186,7 +186,7 @@ available to them. This can be very useful to keep your variables organized when file starts to be too big, or when you want to use :doc:`Ansible Vault<playbooks_vault>` on a part of a group's variables. Note that this only works on Ansible 1.4 or later. -Tip: In Ansible 1.2 or later the group_vars/ and host_vars/ directories can exist in either +Tip: In Ansible 1.2 or later the group_vars/ and host_vars/ directories can exist in the playbook directory OR the inventory directory. If both paths exist, variables in the playbook directory will override variables set in the inventory directory. From 9e31e33850a220ffda09d685d51b1eddb100116d Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Wed, 17 Feb 2016 14:15:11 +0000 Subject: [PATCH 0651/1113] minor bugfix that will catch connection errors in nxos This commit fixes a situation where connection errors would be caught but no useful information display. The connection error is now caught and emitted in a call to fail_json --- lib/ansible/module_utils/nxos.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py index 8c681830a6d..e18aea47739 100644 --- a/lib/ansible/module_utils/nxos.py +++ b/lib/ansible/module_utils/nxos.py @@ -147,7 +147,8 @@ class Cli(object): try: self.shell.open(host, port=port, username=username, password=password) except Exception, exc: - self.module.fail_json('Failed to connect to {0}:{1} - {2}'.format(host, port, str(exc))) + msg = 'failed to connecto to %s:%s - %s' % (host, port, str(exc)) + self.module.fail_json(msg=msg) def send(self, commands, encoding='text'): return self.shell.send(commands) @@ -188,7 +189,7 @@ class NetworkModule(AnsibleModule): def configure(self, commands): commands = to_list(commands) if self.params['transport'] == 'cli': - commands.insert(0, 'configure terminal') + commands.insert(0, 'configure') responses = self.execute(commands) responses.pop(0) else: From 1a072578c3cf87829ee1dd4801f8a5ffc02a11b1 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Wed, 17 Feb 2016 14:14:54 +0000 Subject: [PATCH 0652/1113] minor bugfix that will catch connection errors in junos This commit fixes a situation where connection errors would be caught but no useful information display. The connection error is now caught and emitted in a call to fail_json --- lib/ansible/module_utils/junos.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/junos.py b/lib/ansible/module_utils/junos.py index 4912d008ff0..85aad60aca2 100644 --- a/lib/ansible/module_utils/junos.py +++ b/lib/ansible/module_utils/junos.py @@ -47,7 +47,12 @@ class Cli(object): password = self.module.params['password'] self.shell = Shell() - self.shell.open(host, port=port, username=username, password=password) + + try: + self.shell.open(host, port=port, username=username, password=password) + except Exception, exc: + msg = 'failed to connecto to %s:%s - %s' % (host, port, str(exc)) + self.module.fail_json(msg=msg) def send(self, commands): return self.shell.send(commands) From b72b14fdf2c0caa7cd4b6b4695c134f9af174752 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Wed, 17 Feb 2016 14:14:41 +0000 Subject: [PATCH 0653/1113] minor bugfix that will catch connection errors in iosxr This commit fixes a situation where connection errors would be caught but no useful information display. The connection error is now caught and emitted in a call to fail_json --- lib/ansible/module_utils/iosxr.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/iosxr.py b/lib/ansible/module_utils/iosxr.py index 7ca360c5efd..e2c7c983916 100644 --- a/lib/ansible/module_utils/iosxr.py +++ b/lib/ansible/module_utils/iosxr.py @@ -49,7 +49,12 @@ class Cli(object): password = self.module.params['password'] self.shell = Shell() - self.shell.open(host, port=port, username=username, password=password) + + try: + self.shell.open(host, port=port, username=username, password=password) + except Exception, exc: + msg = 'failed to connecto to %s:%s - %s' % (host, port, str(exc)) + self.module.fail_json(msg=msg) def send(self, commands): return self.shell.send(commands) From 0d3c0515c0fa9a694f3ad631d1df1ace3ee789ee Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Wed, 17 Feb 2016 14:14:24 +0000 Subject: [PATCH 0654/1113] minor bugfix that will catch connection errors in ios This commit fixes a situation where connection errors would be caught but no useful information display. The connection error is now caught and emitted in a call to fail_json --- lib/ansible/module_utils/ios.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/ios.py b/lib/ansible/module_utils/ios.py index 3340213fa81..00d746f0e0f 100644 --- a/lib/ansible/module_utils/ios.py +++ b/lib/ansible/module_utils/ios.py @@ -57,7 +57,8 @@ class Cli(object): try: self.shell.open(host, port=port, username=username, password=password) except Exception, exc: - self.module.fail_json('Failed to connect to {0}:{1} - {2}'.format(host, port, str(exc))) + msg = 'failed to connecto to %s:%s - %s' % (host, port, str(exc)) + self.module.fail_json(msg=msg) def authorize(self): passwd = self.module.params['auth_pass'] From 91f3558c64e64c56ac37f284ae3fc0ab7e261cc5 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Wed, 17 Feb 2016 14:12:50 +0000 Subject: [PATCH 0655/1113] minor bugfix that will catch connection errors in eos This commit fixes a situation where connection errors would be caught but no useful information display. The connection error is now caught and emitted in a call to fail_json --- lib/ansible/module_utils/eos.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/eos.py b/lib/ansible/module_utils/eos.py index 5e0dfefc353..71fa8802b66 100644 --- a/lib/ansible/module_utils/eos.py +++ b/lib/ansible/module_utils/eos.py @@ -127,7 +127,8 @@ class Cli(object): try: self.shell.open(host, port=port, username=username, password=password) except Exception, exc: - self.module.fail_json('Failed to connect to {0}:{1} - {2}'.format(host, port, str(exc))) + msg = 'failed to connecto to %s:%s - %s' % (host, port, str(exc)) + self.module.fail_json(msg=msg) def authorize(self): passwd = self.module.params['auth_pass'] From 439baf004e372e9ed818b15e0a191333dc11705f Mon Sep 17 00:00:00 2001 From: b4ldr <github@johnbond.org> Date: Wed, 17 Feb 2016 18:21:12 +0000 Subject: [PATCH 0656/1113] update uptime script to use version 2.0 of the api --- examples/scripts/uptime.py | 103 ++++++++++++++++++++++++++++--------- 1 file changed, 80 insertions(+), 23 deletions(-) diff --git a/examples/scripts/uptime.py b/examples/scripts/uptime.py index 167dedcede7..cf3c604f810 100755 --- a/examples/scripts/uptime.py +++ b/examples/scripts/uptime.py @@ -1,31 +1,88 @@ #!/usr/bin/python -# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> -# example of getting the uptime of all hosts, 10 at a time +from collections import namedtuple +from ansible.parsing.dataloader import DataLoader +from ansible.vars import VariableManager +from ansible.inventory import Inventory +from ansible.playbook.play import Play +from ansible.executor.task_queue_manager import TaskQueueManager +from ansible.plugins.callback import CallbackBase -import ansible.runner -import sys +# Creat a callback object so we can capture the output +class ResultsCollector(CallbackBase): -# construct the ansible runner and execute on all hosts -results = ansible.runner.Runner( - pattern='*', forks=10, - module_name='command', module_args='/usr/bin/uptime', -).run() + def __init__(self, *args, **kwargs): + super(ResultsCollector, self).__init__(*args, **kwargs) + self.host_ok = {} + self.host_unreachable = {} + self.host_failed = {} -if results is None: - print "No hosts found" - sys.exit(1) + def v2_runner_on_unreachable(self, result): + self.host_unreachable[result._host.get_name()] = result -print "UP ***********" -for (hostname, result) in results['contacted'].items(): - if not 'failed' in result: - print "%s >>> %s" % (hostname, result['stdout']) + def v2_runner_on_ok(self, result, *args, **kwargs): + self.host_ok[result._host.get_name()] = result -print "FAILED *******" -for (hostname, result) in results['contacted'].items(): - if 'failed' in result: - print "%s >>> %s" % (hostname, result['msg']) + def v2_runner_on_failed(self, result, *args, **kwargs): + self.host_failed[result._host.get_name()] = result -print "DOWN *********" -for (hostname, result) in results['dark'].items(): - print "%s >>> %s" % (hostname, result) +def main(): + host_list = ['localhost', 'www.example.com', 'www.google.com'] + Options = namedtuple('Options', ['connection','module_path', 'forks', 'remote_user', + 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', + 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check']) + + # initialize needed objects + variable_manager = VariableManager() + loader = DataLoader() + options = Options(connection='smart', module_path='/usr/share/ansible', forks=100, + remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, + sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, + become_user=None, verbosity=None, check=False) + + passwords = dict() + + # create inventory and pass to var manager + inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=host_list) + variable_manager.set_inventory(inventory) + + # create play with tasks + play_source = dict( + name = "Ansible Play", + hosts = host_list, + gather_facts = 'no', + tasks = [ dict(action=dict(module='command', args=dict(cmd='/usr/bin/uptime'))) ] + ) + play = Play().load(play_source, variable_manager=variable_manager, loader=loader) + + # actually run it + tqm = None + callback = ResultsCollector() + try: + tqm = TaskQueueManager( + inventory=inventory, + variable_manager=variable_manager, + loader=loader, + options=options, + passwords=passwords, + ) + tqm._stdout_callback = callback + result = tqm.run(play) + finally: + if tqm is not None: + tqm.cleanup() + + print "UP ***********" + for host, result in callback.host_ok.items(): + print '{} >>> {}'.format(host, result._result['stdout']) + + print "FAILED *******" + for host, result in callback.host_failed.items(): + print '{} >>> {}'.format(host, result._result['msg']) + + print "DOWN *********" + for host, result in callback.host_unreachable.items(): + print '{} >>> {}'.format(host, result._result['msg']) + +if __name__ == '__main__': + main() From 588045cc4615c6c9d8522f5dfa346b5feffeb4e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= <yannig.perre@gmail.com> Date: Wed, 17 Feb 2016 21:30:34 +0100 Subject: [PATCH 0657/1113] Improve message content to get more hint about the raised error. --- lib/ansible/template/vars.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/template/vars.py b/lib/ansible/template/vars.py index d55169368ac..badf93b1e86 100644 --- a/lib/ansible/template/vars.py +++ b/lib/ansible/template/vars.py @@ -21,6 +21,7 @@ __metaclass__ = type from ansible.compat.six import iteritems from jinja2.utils import missing +from ansible.utils.unicode import to_unicode __all__ = ['AnsibleJ2Vars'] @@ -83,7 +84,12 @@ class AnsibleJ2Vars: if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars): return variable else: - return self._templar.template(variable) + value = None + try: + value = self._templar.template(variable) + except Exception as e: + raise type(e)(to_unicode(variable) + ': ' + e.message) + return value def add_locals(self, locals): ''' From 517acb57738800ae72a140044ecb09325a461f55 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 17 Feb 2016 16:39:42 -0500 Subject: [PATCH 0658/1113] Renaming ISSUE_TEMPLATE.md -> issue_template.md --- ISSUE_TEMPLATE.md => issue_template.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename ISSUE_TEMPLATE.md => issue_template.md (100%) diff --git a/ISSUE_TEMPLATE.md b/issue_template.md similarity index 100% rename from ISSUE_TEMPLATE.md rename to issue_template.md From 90fb809f6298d475ee27f9b2f81bb96fa5904811 Mon Sep 17 00:00:00 2001 From: Marcus Furlong <furlongm@gmail.com> Date: Thu, 18 Feb 2016 14:36:52 +1100 Subject: [PATCH 0659/1113] fix ansible_os_family fact on openSUSE Leap ansible_os_family on openSUSE Leap has the wrong value: "ansible_os_family": "openSUSE Leap", It should be: "ansible_os_family": "Suse", This change fixes that by adding the relevant key and ensuring that dict lookups replace ' ' with '_' so the key does not contain a space. --- lib/ansible/module_utils/facts.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 4db79214d3f..9b94ecd3db5 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -274,7 +274,7 @@ class Facts(object): Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux', Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris', SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin', - FreeBSD = 'FreeBSD', HPUX = 'HP-UX' + FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse' ) # TODO: Rewrite this to use the function references in a dict pattern @@ -511,8 +511,9 @@ class Facts(object): machine_id = machine_id.split('\n')[0] self.facts["machine_id"] = machine_id self.facts['os_family'] = self.facts['distribution'] - if self.facts['distribution'] in OS_FAMILY: - self.facts['os_family'] = OS_FAMILY[self.facts['distribution']] + distro = self.facts['distribution'].replace(' ', '_') + if distro in OS_FAMILY: + self.facts['os_family'] = OS_FAMILY[distro] def get_cmdline(self): data = get_file_content('/proc/cmdline') From 275bd1b1217d8330a2d70c134f8b33f067cd4f31 Mon Sep 17 00:00:00 2001 From: Maxim Burgerhout <maxim@redhat.com> Date: Thu, 18 Feb 2016 08:29:58 +0100 Subject: [PATCH 0660/1113] Fix a typo in man1/ansible --- docs/man/man1/ansible.1.asciidoc.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/man/man1/ansible.1.asciidoc.in b/docs/man/man1/ansible.1.asciidoc.in index 4cabe6c1dce..191b9be8aa6 100644 --- a/docs/man/man1/ansible.1.asciidoc.in +++ b/docs/man/man1/ansible.1.asciidoc.in @@ -104,7 +104,7 @@ Alternatively you can use a comma separated list of hosts or single host with tr *-l* 'SUBSET', *--limit=*'SUBSET':: Further limits the selected host/group patterns. -You can prefix it with '~' to indicate that the patter in a regex. +You can prefix it with '~' to indicate that the pattern is a regex. *--list-hosts*:: From 6779f91b88bb3486233db5c3ea06677c84a8e24f Mon Sep 17 00:00:00 2001 From: Marc Pujol <mpujol@iiia.csic.es> Date: Thu, 18 Feb 2016 09:15:07 +0100 Subject: [PATCH 0661/1113] Avoid duplicate /bin/lsblk calls in the setup module. The setup module calls /bin/lsblk once for each device appearing in the /etc/mtab file. However, the same device appears there mutliple times when the system uses bind-mounts. As a result, /bin/lsblk is being called repeatedly to get the uuid of the same device. On a system with many mounts, this leads to a TimeoutError in the get_mount_facts function of the setup module as described in #14551. Fixes #14551 --- lib/ansible/module_utils/facts.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 4db79214d3f..dc0d9ac1553 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -1037,6 +1037,7 @@ class LinuxHardware(Hardware): @timeout(10) def get_mount_facts(self): + uuids = dict() self.facts['mounts'] = [] mtab = get_file_content('/etc/mtab', '') for line in mtab.split('\n'): @@ -1052,13 +1053,17 @@ class LinuxHardware(Hardware): except OSError: continue - uuid = 'NA' - lsblkPath = module.get_bin_path("lsblk") - if lsblkPath: - rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True) + if fields[0] in uuids: + uuid = uuids[fields[0]] + else: + uuid = 'NA' + lsblkPath = module.get_bin_path("lsblk") + if lsblkPath: + rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True) - if rc == 0: - uuid = out.strip() + if rc == 0: + uuid = out.strip() + uuids[fields[0]] = uuid self.facts['mounts'].append( {'mount': fields[1], From 2b5fffa751c0a6ec364e66cc648f456d28b3a8a7 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 01:41:23 -0800 Subject: [PATCH 0662/1113] updated to use to_unicode as per feedback --- lib/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 4ed496fbe98..7c8d6a1bb72 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -331,7 +331,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): else: x = remote_stat['checksum'] # if 1, file is missing except AnsibleError as e: - errormsg = to_bytes(e) + errormsg = to_unicode(e) if errormsg.endswith('Permission denied'): x = "2" # cannot read file elif errormsg.endswith('MODULE FAILURE'): From 0a4642fcc2824da7c8430e7b80af7321936ed3ea Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 02:01:37 -0800 Subject: [PATCH 0663/1113] added examples for new diff color configs --- examples/ansible.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index b357738b39c..076320d7230 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -273,3 +273,6 @@ #unreachable = red #ok = green #changed = yellow +#diff_add = green +#diff_remove = red +#diff_lines = cyan From 6012646d8c5301b3b5fa63d58e5269af08275449 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 2 Dec 2015 20:36:21 -0800 Subject: [PATCH 0664/1113] added playbook and options info to callbacks will display on certain verbosity levels, both playbook/file info and non empty options with which it's running. avoid errors when not using CLI classes --- lib/ansible/plugins/callback/__init__.py | 10 ++++++++++ lib/ansible/plugins/callback/default.py | 13 +++++++++++++ 2 files changed, 23 insertions(+) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 58904420af0..ce0361700c2 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -39,6 +39,11 @@ except ImportError: __all__ = ["CallbackBase"] +try: + from __main__ import cli +except ImportError: + # using API w/o cli + cli = False class CallbackBase: @@ -54,6 +59,11 @@ class CallbackBase: else: self._display = global_display + if cli: + self._options = cli.options + else: + self._options = None + if self._display.verbosity >= 4: name = getattr(self, 'CALLBACK_NAME', 'unnamed') ctype = getattr(self, 'CALLBACK_TYPE', 'old') diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index df32112cb39..9d6c9d8c5b4 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -230,3 +230,16 @@ class CallbackModule(CallbackBase): self._display.display("", screen_only=True) + def v2_playbook_on_start(self, playbook): + if self._display.verbosity > 1: + from os.path import basename + self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name)) + + if self._display.verbosity > 3: + if self._options is not None: + for option in dir(self._options): + if option.startswith('_') or option in ['read_file', 'ensure_value', 'read_module']: + continue + val = getattr(self._options,option) + if val: + self._display.vvvv('%s: %s' % (option,val)) From fcf9cdde016a025be60bc172de2d1684e371206c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 02:15:30 -0800 Subject: [PATCH 0665/1113] updated diff info, added slack callback also added note about callback new info access --- CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c379fb31ccc..668d0f1f68e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ Ansible Changes By Release ###Major Changes: -* added facility for modules to send back 'diff' for display when ansible is called with --diff, file, puppet and other module already implement this +* added facility for modules to send back 'diff' for display when ansible is called with --diff, updated several modules to return this info ####New Modules: * aws: ec2_vol_facts @@ -17,6 +17,13 @@ Ansible Changes By Release ####New Filters: * extract +####New Callbacks: +* slack + +###Minor Changes: + +* callbacks now have access to the options with which the CLI was called + ## 2.0 "Over the Hills and Far Away" ###Major Changes: From d54d9dabe955b3982711c8efbd4fc1548ef59121 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 02:21:58 -0800 Subject: [PATCH 0666/1113] added specific info to deprecation message should now show actual variable that is undefined fixes #14526 --- lib/ansible/executor/task_executor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 2af5908a275..dd3f69bd0f6 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -35,7 +35,7 @@ from ansible.playbook.task import Task from ansible.template import Templar from ansible.utils.encrypt import key_for_hostname from ansible.utils.listify import listify_lookup_plugin_terms -from ansible.utils.unicode import to_unicode +from ansible.utils.unicode import to_unicode, to_bytes from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var try: @@ -185,7 +185,7 @@ class TaskExecutor: loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: loop_terms = [] - display.deprecated("Skipping task due to undefined Error, in the future this will be a fatal error.") + display.deprecated("Skipping task due to undefined Error, in the future this will be a fatal error.: %s" % to_bytes(e)) items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=self._job_vars) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) From 905b156133b200b5df5ff45e2a665e53990030d3 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen <ams@2ndQuadrant.com> Date: Thu, 18 Feb 2016 08:53:40 +0530 Subject: [PATCH 0667/1113] Edit the ISSUE_TEMPLATE The old template was effusive at the expense of making the text harder to read and easier to miss things in. This one is more direct, and easy to scan quickly. --- issue_template.md | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/issue_template.md b/issue_template.md index 094501db906..f20486fe6e0 100644 --- a/issue_template.md +++ b/issue_template.md @@ -1,39 +1,47 @@ ##### Issue Type: -Can you help us out in labelling this by telling us what kind of ticket this this? You can say: - - Bug Report - - Feature Idea - - Feature Pull Request - - New Module Pull Request - - Bugfix Pull Request - - Documentation Report - - Docs Pull Request +Please pick one and delete the rest: + - Bug Report + - Feature Idea + - Feature Pull Request + - New Module Pull Request + - Bugfix Pull Request + - Documentation Report + - Docs Pull Request ##### Ansible Version: -Let us know what version of Ansible you are using. Please supply the verbatim output from running “ansible --version”. If you're filing a ticket on a version of Ansible which is not the latest, we'd greatly appreciate it if you could retest on the latest version first. We don't expect you to test against the development branch most of the time, but we may ask for that if you have cycles. Thanks! +Please paste the verbatim output from running “ansible --version”. ##### Ansible Configuration: -What have you changed about your Ansible installation? What configuration settings have you changed/added/removed? Compare your /etc/ansible/ansible.cfg against a clean version from Github and let us know what's different. +Please mention any settings you've changed/added/removed in ansible.cfg +(or using the ANSIBLE_* environment variables). ##### Environment: -What OS are you running Ansible from and what OS are you managing? Examples include RHEL 5/6, Centos 5/6, Ubuntu 12.04/13.10, *BSD, Solaris. If this is a generic feature request or it doesn’t apply, just say “N/A”. Not all tickets may be about operating system related things and we understand that. +Please mention the OS you are running Ansible from, and the OS you are +managing, or say “N/A” for anything that isn't platform-specific. ##### Summary: -Please summarize your request in this space. You will earn bonus points for being succinct, but please add enough detail so we can understand the request. Thanks! +Please explain the problem briefly. ##### Steps To Reproduce: -If this is a bug ticket, please enter the steps you use to reproduce the problem in the space below. If this is a feature request, please enter the steps you would use to use the feature. If an example playbook is useful, please include a short reproducer inline, indented by four spaces. If a longer one is necessary, linking to one uploaded to gist.github.com would be great. Much appreciated! +For bugs, please show exactly how to reproduce the problem. For new +features, show how the feature would be used. + +You can include example playbooks inline (indented by four spaces) or +upload larger ones to gist.github.com and paste a link here. ##### Expected Results: -Please enter your expected results in this space. When running the steps supplied above in the previous section, what did you expect to happen? If showing example output, please indent your output by four spaces so it will render correctly in GitHub's viewer thingy. +What did you expect to happen when running the steps above? ##### Actual Results: -Please enter your actual results in this space. When running the steps supplied above, what actually happened? If you are showing example output, please indent your output by four spaces so it will render correctly in GitHub. Thanks again! +What actually happened? +(If you're pasting verbatim command output, remember to indent it by +four spaces so it will render correctly in Github.) From ff8b48ca65aa77d9baf8f9d9ea24ca9199a8d7a2 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen <ams@2ndQuadrant.com> Date: Thu, 18 Feb 2016 09:10:12 +0530 Subject: [PATCH 0668/1113] Create a PULL_REQUEST_TEMPLATE Now that Github supports separate issue and PR templates, we can have a separate cut-down version for PRs without all the things we ask for in a new issue. The PR types are also removed from the ISSUE_TEMPLATE. --- PULL_REQUEST_TEMPLATE.md | 23 +++++++++++++++++++++++ issue_template.md | 4 ---- 2 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 PULL_REQUEST_TEMPLATE.md diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..bb64deb50df --- /dev/null +++ b/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,23 @@ +##### Issue Type: + +Please pick one and delete the rest: + - Feature Pull Request + - New Module Pull Request + - Bugfix Pull Request + - Docs Pull Request + +##### Ansible Version: + +Please paste the verbatim output from running “ansible --version”. + +##### Summary: + +Please describe the change and the reason for it. + +(If you're fixing an existing issue, please include "Fixes #nnn" in your +commit message and your description; but you should still explain what +the change does.) + +##### Example output: + +If necessary, paste example output (indented by four spaces) here. diff --git a/issue_template.md b/issue_template.md index f20486fe6e0..57ef3c90f64 100644 --- a/issue_template.md +++ b/issue_template.md @@ -3,11 +3,7 @@ Please pick one and delete the rest: - Bug Report - Feature Idea - - Feature Pull Request - - New Module Pull Request - - Bugfix Pull Request - Documentation Report - - Docs Pull Request ##### Ansible Version: From 2c723a135b2b9f9885ef9d440f70d1509c1fe881 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen <ams@2ndQuadrant.com> Date: Thu, 18 Feb 2016 09:16:37 +0530 Subject: [PATCH 0669/1113] Rename issue_template.md back to ISSUE_TEMPLATE.md I can only assume that it was renamed because Github suddenly started to act on ISSUE_TEMPLATE; but that's what we want now, so back it goes. --- issue_template.md => ISSUE_TEMPLATE.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename issue_template.md => ISSUE_TEMPLATE.md (100%) diff --git a/issue_template.md b/ISSUE_TEMPLATE.md similarity index 100% rename from issue_template.md rename to ISSUE_TEMPLATE.md From 1dba6da40ffd0b5a687116171f62d48d5b6e72f8 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 02:32:05 -0800 Subject: [PATCH 0670/1113] changed filtered to actionable cb plugin --- lib/ansible/plugins/callback/{filtered.py => actionable.py} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename lib/ansible/plugins/callback/{filtered.py => actionable.py} (98%) diff --git a/lib/ansible/plugins/callback/filtered.py b/lib/ansible/plugins/callback/actionable.py similarity index 98% rename from lib/ansible/plugins/callback/filtered.py rename to lib/ansible/plugins/callback/actionable.py index 094c37ed985..c0a22d4357a 100644 --- a/lib/ansible/plugins/callback/filtered.py +++ b/lib/ansible/plugins/callback/actionable.py @@ -25,7 +25,7 @@ class CallbackModule(CallbackModule_default): CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'filtered' + CALLBACK_NAME = 'actionable' def __init__(self): self.super_ref = super(CallbackModule, self) From 93df09e3871009750e2cfafa743441674116f35c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 02:32:42 -0800 Subject: [PATCH 0671/1113] added actionable to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 668d0f1f68e..83f7b92e2ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ Ansible Changes By Release * extract ####New Callbacks: +* actionable (only shows changed and failed) * slack ###Minor Changes: From 54cd2069e616ffd0d16e6d641daf5f1883a94492 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen <ams@2ndQuadrant.com> Date: Thu, 18 Feb 2016 16:11:47 +0530 Subject: [PATCH 0672/1113] Use triple-backticks for verbatim output It's easier than indenting by four spaces, as @willthames points out. --- ISSUE_TEMPLATE.md | 16 +++++++++++----- PULL_REQUEST_TEMPLATE.md | 8 ++++++-- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 57ef3c90f64..e67be84a3c3 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -7,7 +7,9 @@ Please pick one and delete the rest: ##### Ansible Version: -Please paste the verbatim output from running “ansible --version”. +``` +(Paste verbatim output from “ansible --version” here) +``` ##### Ansible Configuration: @@ -28,8 +30,11 @@ Please explain the problem briefly. For bugs, please show exactly how to reproduce the problem. For new features, show how the feature would be used. -You can include example playbooks inline (indented by four spaces) or -upload larger ones to gist.github.com and paste a link here. +``` +(Paste example playbooks or commands here) +``` + +You can also paste gist.github.com links for larger files. ##### Expected Results: @@ -39,5 +44,6 @@ What did you expect to happen when running the steps above? What actually happened? -(If you're pasting verbatim command output, remember to indent it by -four spaces so it will render correctly in Github.) +``` +(Paste verbatim command output here) +``` diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index bb64deb50df..d532449adef 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -8,7 +8,9 @@ Please pick one and delete the rest: ##### Ansible Version: -Please paste the verbatim output from running “ansible --version”. +``` +(Paste verbatim output from “ansible --version” here) +``` ##### Summary: @@ -20,4 +22,6 @@ the change does.) ##### Example output: -If necessary, paste example output (indented by four spaces) here. +``` +(Paste verbatim command output here if necessary) +``` From 86b8dc0e7965c885268999648325985e52bd0084 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 18 Feb 2016 04:20:35 -0800 Subject: [PATCH 0673/1113] Add a configuration setting that allows the user to specify printing of task arguments in the header. Fixes #14554 --- docsite/rst/faq.rst | 4 ++-- docsite/rst/intro_configuration.rst | 28 +++++++++++++++++++++++++ examples/ansible.cfg | 10 +++++++++ lib/ansible/plugins/callback/default.py | 14 ++++++++++++- 4 files changed, 53 insertions(+), 3 deletions(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index e51a1751fee..a4b73b7b800 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -304,8 +304,6 @@ How do I keep secret data in my playbook? If you would like to keep secret data in your Ansible content and still share it publicly or keep things in source control, see :doc:`playbooks_vault`. -.. _i_dont_see_my_question: - In Ansible 1.8 and later, if you have a task that you don't want to show the results or command given to it when using -v (verbose) mode, the following task or playbook attribute can be useful:: - name: secret task @@ -323,6 +321,8 @@ Though this will make the play somewhat difficult to debug. It's recommended th be applied to single tasks only, once a playbook is completed. +.. _i_dont_see_my_question: + I don't see my question here ++++++++++++++++++++++++++++ diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 51a1ad1e588..4e5d1a7c009 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -228,6 +228,34 @@ Allows disabling of deprecating warnings in ansible-playbook output:: Deprecation warnings indicate usage of legacy features that are slated for removal in a future release of Ansible. +.. _display_args_to_stdout + +display_args_to_stdout +====================== + +.. versionadded:: 2.1.0 + +By default, ansible-playbook will print a header for each task that is run to +stdout. These headers will contain the ``name:`` field from the task if you +specified one. If you didn't then ansible-playbook uses the task's action to +help you tell which task is presently running. Sometimes you run many of the +same action and so you want more information about the task to differentiate +it from others of the same action. If you set this variable to ``True`` in +the config then ansible-playbook will also include the task's arguments in the +header. + +This setting defaults to ``False`` because there is a chance that you have +sensitive values in your parameters and do not want those to be printed to +stdout:: + + display_args_to_stdout=False + +If you set this to ``True`` you should be sure that you have secured your +environment's stdout (no one can shoulder surf your screen and you aren't +saving stdout to an insecure file) or made sure that all of your playbooks +explicitly added the ``no_log: True`` parameter to tasks which have sensistive +values See :ref:`keep_secret_data` for more information. + .. _display_skipped_hosts: display_skipped_hosts diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 076320d7230..9c5b3bedc1f 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -98,6 +98,16 @@ # task is skipped. #display_skipped_hosts = True +# by default, if a task in a playbook does not include a name: field then +# ansible-playbook will construct a header that includes the task's action but +# not the task's args. This is a security feature because ansible cannot know +# if the *module* considers an argument to be no_log at the time that the +# header is printed. If your environment doesn't have a problem securing +# stdout from ansible-playbook (or you have manually specified no_log in your +# playbook on all of the tasks where you have secret information) then you can +# safely set this to True to get more informative messages. +#display_args_to_stdout = False + # by default (as of 1.3), Ansible will raise errors when attempting to dereference # Jinja2 variables that are not set in templates or action lines. Uncomment this line # to revert the behavior to pre-1.3. diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 9d6c9d8c5b4..ea7b46969ca 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -113,7 +113,19 @@ class CallbackModule(CallbackBase): self._display.banner("NO MORE HOSTS LEFT") def v2_playbook_on_task_start(self, task, is_conditional): - self._display.banner("TASK [%s]" % task.get_name().strip()) + args = '' + # args can be specified as no_log in several places: in the task or in + # the argument spec. We can check whether the task is no_log but the + # argument spec can't be because that is only run on the target + # machine and we haven't run it thereyet at this time. + # + # So we give people a config option to affect display of the args so + # that they can secure this if they feel that their stdout is insecure + # (shoulder surfing, logging stdout straight to a file, etc). + if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: + args = ', '.join(('%s=%s' % a for a in task.args.items())) + args = ' %s' % args + self._display.banner("TASK [%s%s]" % (task.get_name().strip(), args)) if self._display.verbosity > 2: path = task.get_path() if path: From fe09f7ee49d01cf0258852fcd7d455c3a904f507 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 05:09:42 -0800 Subject: [PATCH 0674/1113] clarify that requirements are on host that runs it fixes http://github.com/ansible/ansible-modules-core/issues/3061 --- hacking/templates/rst.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index eccae4cb777..f9a04d423d5 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -45,8 +45,8 @@ Aliases: @{ ','.join(aliases) }@ {% endif %} {% if requirements %} -Requirements ------------- +Requirements (on host that executes module) +------------------------------------------- {% for req in requirements %} * @{ req | convert_symbols_to_format }@ From 5552c049763c2f91f4f1f2f5b622408b563f423e Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 06:58:43 -0800 Subject: [PATCH 0675/1113] allow skipping debug depending on verbosity --- lib/ansible/plugins/action/debug.py | 44 +++++++++++++++++------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index f07d8ea5d05..7728d45cc22 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -26,7 +26,7 @@ class ActionModule(ActionBase): ''' Print statements during execution ''' TRANSFERS_FILES = False - VALID_ARGS = set(['msg', 'var']) + VALID_ARGS = set(['msg', 'var', 'verbosity']) def run(self, tmp=None, task_vars=None): if task_vars is None: @@ -41,26 +41,34 @@ class ActionModule(ActionBase): result = super(ActionModule, self).run(tmp, task_vars) - if 'msg' in self._task.args: - result['msg'] = self._task.args['msg'] + verbosity = 0 + # get task verbosity + if 'verbosity' in self._task.args: + verbosity = int(self._task.args['verbosity']) - elif 'var' in self._task.args: - try: - results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True, bare_deprecated=False) - if results == self._task.args['var']: - raise AnsibleUndefinedVariable - except AnsibleUndefinedVariable: - results = "VARIABLE IS NOT DEFINED!" + if verbosity <= self._display.verbosity: + if 'msg' in self._task.args: + result['msg'] = self._task.args['msg'] - if type(self._task.args['var']) in (list, dict): - # If var is a list or dict, use the type as key to display - result[to_unicode(type(self._task.args['var']))] = results + elif 'var' in self._task.args: + try: + results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True, bare_deprecated=False) + if results == self._task.args['var']: + raise AnsibleUndefinedVariable + except AnsibleUndefinedVariable: + results = "VARIABLE IS NOT DEFINED!" + + if type(self._task.args['var']) in (list, dict): + # If var is a list or dict, use the type as key to display + result[to_unicode(type(self._task.args['var']))] = results + else: + result[self._task.args['var']] = results else: - result[self._task.args['var']] = results - else: - result['msg'] = 'Hello world!' + result['msg'] = 'Hello world!' - # force flag to make debug output module always verbose - result['_ansible_verbose_always'] = True + # force flag to make debug output module always verbose + result['_ansible_verbose_always'] = True + else: + result['skipped'] = True return result From 5a57139d91c8bf02c420e8528f8544b9c2c30a87 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Thu, 18 Feb 2016 15:59:57 +0100 Subject: [PATCH 0676/1113] Improve efficiency of merge_hash This is related to #14559, but only the part for Ansible v2.0 This commit makes merging empty dicts, or equal dicts more efficient. I noticed that while debugging merge_hash a lot of merges related to empty dictionaries and sometimes also identical dictionaries. --- lib/ansible/utils/vars.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/utils/vars.py b/lib/ansible/utils/vars.py index 4d44a068c20..73ba52b4b39 100644 --- a/lib/ansible/utils/vars.py +++ b/lib/ansible/utils/vars.py @@ -74,6 +74,12 @@ def merge_hash(a, b): """ _validate_mutable_mappings(a, b) + + # if a is empty or equal to b, return b + if a == {} or a == b: + return b.copy() + + # if b is empty the below unfolds quickly result = a.copy() # next, iterate over b keys and values From 66dcd8019682a549f4e6511107434d52f824fb38 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 07:03:51 -0800 Subject: [PATCH 0677/1113] added verbosity to debug to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 83f7b92e2ab..d9965780b49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ Ansible Changes By Release ###Minor Changes: * callbacks now have access to the options with which the CLI was called +* debug is now controlable with verbosity ## 2.0 "Over the Hills and Far Away" From 0f73fb0d6fce6c01e79a68267d184290408b429d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 08:56:25 -0800 Subject: [PATCH 0678/1113] better error messages when failing to decrypt --- lib/ansible/parsing/vault/__init__.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index dc30dd0ffbd..8ea80d1b07a 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -328,7 +328,10 @@ class VaultEditor: check_prereqs() ciphertext = self.read_data(filename) - plaintext = self.vault.decrypt(ciphertext) + try: + plaintext = self.vault.decrypt(ciphertext) + except AnsibleError as e: + raise AnsibleError("%s for %s" % (to_bytes(e),to_bytes(filename))) self.write_data(plaintext, output_file or filename, shred=False) def create_file(self, filename): @@ -348,7 +351,10 @@ class VaultEditor: check_prereqs() ciphertext = self.read_data(filename) - plaintext = self.vault.decrypt(ciphertext) + try: + plaintext = self.vault.decrypt(ciphertext) + except AnsibleError as e: + raise AnsibleError("%s for %s" % (to_bytes(e),to_bytes(filename))) if self.vault.cipher_name not in CIPHER_WRITE_WHITELIST: # we want to get rid of files encrypted with the AES cipher @@ -359,9 +365,12 @@ class VaultEditor: def plaintext(self, filename): check_prereqs() - ciphertext = self.read_data(filename) - plaintext = self.vault.decrypt(ciphertext) + + try: + plaintext = self.vault.decrypt(ciphertext) + except AnsibleError as e: + raise AnsibleError("%s for %s" % (to_bytes(e),to_bytes(filename))) return plaintext @@ -371,7 +380,10 @@ class VaultEditor: prev = os.stat(filename) ciphertext = self.read_data(filename) - plaintext = self.vault.decrypt(ciphertext) + try: + plaintext = self.vault.decrypt(ciphertext) + except AnsibleError as e: + raise AnsibleError("%s for %s" % (to_bytes(e),to_bytes(filename))) new_vault = VaultLib(new_password) new_ciphertext = new_vault.encrypt(plaintext) @@ -383,6 +395,7 @@ class VaultEditor: os.chown(filename, prev.st_uid, prev.st_gid) def read_data(self, filename): + try: if filename == '-': data = sys.stdin.read() @@ -471,7 +484,7 @@ class VaultFile(object): this_vault = VaultLib(self.password) dec_data = this_vault.decrypt(tmpdata) if dec_data is None: - raise AnsibleError("Decryption failed") + raise AnsibleError("Failed to decrypt: %s" % self.filename) else: self.tmpfile.write(dec_data) return self.tmpfile From 844754b8e3cf5268a7a0a4d3169b41815d84036a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 18 Feb 2016 14:37:14 -0800 Subject: [PATCH 0679/1113] added missing config to constants.py fixes #14567 --- lib/ansible/constants.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 6dad199e9b5..fc3cf14a7a2 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -237,6 +237,7 @@ DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', ' RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, ispath=True) DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, isnone=True) +DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'ANSIBLE_DISPLAY_ARGS_TO_STDOUT', False, boolean=True) # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-o ControlMaster=auto -o ControlPersist=60s') From 7f7536f7ad3d42e3ba20c0f2c3ccb63d64b47a3c Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Thu, 18 Feb 2016 16:39:31 -0600 Subject: [PATCH 0680/1113] Add json callback to be used in replace of the default stdout to print a JSON structure to stdout --- lib/ansible/plugins/callback/json.py | 83 ++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 lib/ansible/plugins/callback/json.py diff --git a/lib/ansible/plugins/callback/json.py b/lib/ansible/plugins/callback/json.py new file mode 100644 index 00000000000..f1012305d8a --- /dev/null +++ b/lib/ansible/plugins/callback/json.py @@ -0,0 +1,83 @@ +# (c) 2016, Matt Martz <matt@sivel.net> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import json + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'json' + + def __init__(self, display=None): + super(CallbackModule, self).__init__(display) + self.results = [] + + def _new_play(self, play): + return { + 'play': { + 'name': play.name, + 'id': str(play._uuid) + }, + 'tasks': [] + } + + def _new_task(self, task): + return { + 'task': { + 'name': task.name, + 'id': str(task._uuid) + }, + 'hosts': {} + } + + def v2_playbook_on_play_start(self, play): + self.results.append(self._new_play(play)) + + def v2_playbook_on_task_start(self, task, is_conditional): + self.results[-1]['tasks'].append(self._new_task(task)) + + def v2_runner_on_ok(self, result, **kwargs): + host = result._host + self.results[-1]['tasks'][-1]['hosts'][host.name] = result._result + + def v2_playbook_on_stats(self, stats): + """Display info about playbook statistics""" + + hosts = sorted(stats.processed.keys()) + + summary = {} + for h in hosts: + s = stats.summarize(h) + summary[h] = s + + output = { + 'plays': self.results, + 'stats': summary + } + + print(json.dumps(output, indent=4, sort_keys=True)) + + v2_runner_on_failed = v2_runner_on_ok + v2_runner_on_unreachable = v2_runner_on_ok + v2_runner_on_skipped = v2_runner_on_ok From f9526b2ab22e2658d0f6ce5bb93a51cba9685851 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 18 Feb 2016 23:48:53 -0800 Subject: [PATCH 0681/1113] Missed constants.py in the commit for #14557 --- lib/ansible/constants.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index fc3cf14a7a2..9b0bf4b153c 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -225,6 +225,7 @@ ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOC ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True) ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default') ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, islist=True) +DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'DISPLAY_ARGS_TO_STDOUT', False, boolean=True) DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True) DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True) HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True) From 80ec66f64af0e904be2287a178fdf02e722dfe69 Mon Sep 17 00:00:00 2001 From: Will Thames <will@thames.id.au> Date: Fri, 19 Feb 2016 10:38:27 +0000 Subject: [PATCH 0682/1113] Proposal for auto updating roles Reflects discussion on ansible-devel. --- docs/proposals/auto-install-roles.md | 106 +++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 docs/proposals/auto-install-roles.md diff --git a/docs/proposals/auto-install-roles.md b/docs/proposals/auto-install-roles.md new file mode 100644 index 00000000000..cd2406a0f5b --- /dev/null +++ b/docs/proposals/auto-install-roles.md @@ -0,0 +1,106 @@ +# Auto Install Ansible roles + +*Author*: Will Thames <@willthames> + +*Date*: 19/02/2016 + +## Motivation + +To use the latest (or even a specific) version of a playbook with the +appropriate roles, the following steps are typically required: + +``` +git pull upstream branch +ansible-galaxy install -r path/to/rolesfile.yml -p path/to/rolesdir -f +ansible-playbook run-the-playbook.yml +``` + +The most likely step in this process to be forgotten is the middle step. While +we can improve processes and documentation to try and ensure that this step is +not skipped, we can improve ansible-playbook so that the step is not required. + +## Approaches + +### Approach 1: Specify rolesfile and rolesdir in playbook + +Provide new `rolesdir` and `rolesfile` keywords: + +``` +- hosts: application-env + become: True + rolesfile: path/to/rolesfile.yml + rolesdir: path/to/rolesdir + roles: + - roleA + - { role: roleB, tags: role_roleB } +``` + +Running ansible-playbook against such a playbook would cause the roles listed in +`rolesfile` to be installed in `rolesdir`. + +Add new configuration to allow default rolesfile, default rolesdir and +whether or not to auto update roles (defaulting to False) + +#### Advantages + +- Existing mechanism for roles management is maintained +- Playbooks are not polluted with roles 'meta' information (version, source) + +#### Disadvantage + +- Adds two new keywords +- Adds three new configuration variables for defaults + +### Approach 2: Allow rolesfile inclusion + +Allow the `roles` section to include a roles file: + +``` +- hosts: application-env + become: True + roles: + - include: path/to/rolesfile.yml +``` + +Running this playbook would cause the roles to be updated from the included +roles file. + +This would also be functionally equivalent to specifying the roles file +content within the playbook: + +``` +- hosts: application-env + become: True + roles: + - src: https://git.example.com/roleA.git + scm: git + version: 0.1 + - src: https://git.example.com/roleB.git + scm: git + version: 0.3 + tags: role_roleB +``` + +#### Advantages + +- The existing rolesfile mechanism is maintained +- Uses familiar inclusion mechanism + +#### Disadvantage + +- Separate playbooks would need separate rolesfiles. For example, a provision + playbook and upgrade playbook would likely have some overlap - currently + you can use the same rolesfile with ansible-galaxy so that the same + roles are available but only a subset of roles is used by the smaller + playbook. +- The roles file would need to be able to include playbook features such + as role tagging. +- New configuration defaults would likely still be required (and possibly + an override keyword for rolesdir and role auto update) + +## Conclusion + +The author's preferred approach is currently Approach 1. + +Feedback is requested to improve either approach, or provide further +approaches to solve this problem. From bbc855c218ac31d896259cd759e4d1b6fba5264e Mon Sep 17 00:00:00 2001 From: George Christou <gechrr@gmail.com> Date: Tue, 16 Feb 2016 10:31:40 +0000 Subject: [PATCH 0683/1113] Make --diff lines of context configurable --- lib/ansible/constants.py | 3 +++ lib/ansible/plugins/callback/__init__.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 9b0bf4b153c..116084cbd9f 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -284,6 +284,9 @@ COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD' COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red') COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan') +# diff +DIFF_CONTEXT = get_config(p, 'diff', 'context', 'ANSIBLE_DIFF_CONTEXT', 3, integer=True) + # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] MODULE_NO_JSON = ['command', 'shell', 'raw'] diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index ce0361700c2..19a861c89b9 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -144,7 +144,7 @@ class CallbackBase: tofile=after_header, fromfiledate='', tofiledate='', - n=10) + n=C.DIFF_CONTEXT) has_diff = False for line in differ: has_diff = True From c4e4dd351e1e02975bbbd14146f68f59ed2229a8 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 19 Feb 2016 07:12:52 -0800 Subject: [PATCH 0684/1113] added json callback to chnglog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9965780b49..3889f124432 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ Ansible Changes By Release ####New Callbacks: * actionable (only shows changed and failed) * slack +* json ###Minor Changes: From c172a289e4dda023be702453df13b85557f23add Mon Sep 17 00:00:00 2001 From: Sandra Wills <docschick@ansible.com> Date: Fri, 19 Feb 2016 10:32:34 -0500 Subject: [PATCH 0685/1113] light editing of variable precedence list wording QA had asked me a while ago to clean up the way the precedence list for 1.x was worded, as the intro from the list started with "then comes", as if something should preceed it. The comments from OxABAB were not helpful themselves, but his issue reminded me that this was on my to do list to make a little cleaner and clearer. Edits made to remove the "then comes" intros for each list line, to help with clarity. --- docsite/rst/playbooks_variables.rst | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 122c0ef9232..421b5507df6 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -586,7 +586,7 @@ in Ansible. Effectively registered variables are just like facts. .. _accessing_complex_variable_data: Accessing Complex Variable Data -``````````````````````````````` +```````````````````````````````` We already talked about facts a little higher up in the documentation. @@ -758,19 +758,20 @@ If multiple variables of the same name are defined in different places, they get .. include:: ansible_ssh_changes_note.rst -In 1.x the precedence is (last listed wins): +In 1.x, the precedence is as follows (with the last listed variables winning prioritization): - * then "role defaults", which are the most "defaulty" and lose in priority to everything. - * then come the variables defined in inventory - * then come the facts discovered about a system - * then comes "most everything else" (command line switches, vars in play, included vars, role vars, etc) - * then come connection variables (``ansible_user``, etc) + * "role defaults", which lose in priority to everything and are the most easily overridden + * variables defined in inventory + * facts discovered about a system + * "most everything else" (command line switches, vars in play, included vars, role vars, etc.) + * connection variables (``ansible_user``, etc.) * extra vars (``-e`` in the command line) always win -.. note:: In versions prior to 1.5.4, facts discovered about a system were in the "most everything else" category above. +.. note:: + In versions prior to 1.5.4, facts discovered about a system were in the "most everything else" category above. -In 2.x we have made the order of precedence more specific (last listed wins): +In 2.x, we have made the order of precedence more specific (with last listed variables winning prioritization): * role defaults [1]_ * inventory vars [2]_ @@ -787,7 +788,7 @@ In 2.x we have made the order of precedence more specific (last listed wins): * role and include vars * block vars (only for tasks in block) * task vars (only for the task) - * extra vars + * extra vars (always win precedence) Basically, anything that goes into "role defaults" (the defaults folder inside the role) is the most malleable and easily overridden. Anything in the vars directory of the role overrides previous versions of that variable in namespace. The idea here to follow is that the more explicit you get in scope, the more precedence it takes with command line ``-e`` extra vars always winning. Host and/or inventory variables can win over role defaults, but not explicit includes like the vars directory or an ``include_vars`` task. @@ -815,7 +816,7 @@ but they behave like other variables, so if you really want to override the remo .. _variable_scopes: Variable Scopes -``````````````` +```````````````` Ansible has 3 main scopes: From 11f081cb992b508e47dcfefdbc79c995b3212266 Mon Sep 17 00:00:00 2001 From: Sandra Wills <docschick@ansible.com> Date: Fri, 19 Feb 2016 10:35:43 -0500 Subject: [PATCH 0686/1113] minor typo fix missing "the" added to 2.x listing to match wording for 1.x --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 421b5507df6..e897d30dc9b 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -771,7 +771,7 @@ In 1.x, the precedence is as follows (with the last listed variables winning pri In versions prior to 1.5.4, facts discovered about a system were in the "most everything else" category above. -In 2.x, we have made the order of precedence more specific (with last listed variables winning prioritization): +In 2.x, we have made the order of precedence more specific (with the last listed variables winning prioritization): * role defaults [1]_ * inventory vars [2]_ From f1d2b9ea9e96dfe1e8c34398b336a3c3d6678cff Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 20 Feb 2016 14:33:53 -0500 Subject: [PATCH 0687/1113] added size to mount facts on all non linux OSs fixes #14528 --- lib/ansible/module_utils/facts.py | 35 +++++++++++++++++++------------ 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 4842dfd13ae..d00e46f1091 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -771,6 +771,17 @@ class Facts(object): val = len(option_tokens) == 2 and option_tokens[1] or True self.facts['dns']['options'][option_tokens[0]] = val + def _get_mount_size_facts(self, mountpoint): + size_total = None + size_available = None + try: + statvfs_result = os.statvfs(mountpoint) + size_total = statvfs_result.f_bsize * statvfs_result.f_blocks + size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail) + except OSError: + pass + return size_total, size_available + class Hardware(Facts): """ This is a generic Hardware subclass of Facts. This should be further @@ -1045,15 +1056,7 @@ class LinuxHardware(Hardware): if line.startswith('/'): fields = line.rstrip('\n').split() if(fields[2] != 'none'): - size_total = None - size_available = None - try: - statvfs_result = os.statvfs(fields[1]) - size_total = statvfs_result.f_bsize * statvfs_result.f_blocks - size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail) - except OSError: - continue - + size_total, size_available = self._get_mount_size_facts(fields[2]) if fields[0] in uuids: uuid = uuids[fields[0]] else: @@ -1300,7 +1303,9 @@ class SunOSHardware(Hardware): if fstab: for line in fstab.split('\n'): fields = line.rstrip('\n').split('\t') - self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4]}) + size_total, size_available = self._get_mount_size_facts(fields[1]) + self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4], 'size_total': size_total, 'size_available': size_available}) + class OpenBSDHardware(Hardware): """ @@ -1350,7 +1355,9 @@ class OpenBSDHardware(Hardware): fields = re.sub(r'\s+',' ',line.rstrip('\n')).split() if fields[1] == 'none' or fields[3] == 'xx': continue - self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]}) + size_total, size_available = self._get_mount_size_facts(fields[1]) + self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available}) + def get_memory_facts(self): # Get free memory. vmstat output looks like: @@ -1473,7 +1480,8 @@ class FreeBSDHardware(Hardware): if line.startswith('#') or line.strip() == '': continue fields = re.sub(r'\s+',' ',line.rstrip('\n')).split() - self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]}) + size_total, size_available = self._get_mount_size_facts(fields[1]) + self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available}) def get_device_facts(self): sysdir = '/dev' @@ -1602,7 +1610,8 @@ class NetBSDHardware(Hardware): if line.startswith('#') or line.strip() == '': continue fields = re.sub(r'\s+',' ',line.rstrip('\n')).split() - self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]}) + size_total, size_available = self._get_mount_size_facts(fields[1]) + self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available}) class AIX(Hardware): """ From d9246aacd0eb66946673582ffbb5fa0d57261434 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 20 Feb 2016 14:46:18 -0500 Subject: [PATCH 0688/1113] fixed mount sizes for linux --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index d00e46f1091..fd5729c7c0d 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -1056,7 +1056,7 @@ class LinuxHardware(Hardware): if line.startswith('/'): fields = line.rstrip('\n').split() if(fields[2] != 'none'): - size_total, size_available = self._get_mount_size_facts(fields[2]) + size_total, size_available = self._get_mount_size_facts(fields[1]) if fields[0] in uuids: uuid = uuids[fields[0]] else: From 439a385215fb11766a83542e2ee00d1e66b5b5a7 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Thu, 18 Feb 2016 15:17:42 -0600 Subject: [PATCH 0689/1113] Catch TypeError on join, and if caught just return a single item, or a list of many --- lib/ansible/template/__init__.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index bec34f81aa7..edb34bb4bed 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -420,7 +420,13 @@ class Templar: if wantlist: ran = wrap_var(ran) else: - ran = UnsafeProxy(",".join(ran)) + try: + ran = UnsafeProxy(",".join(ran)) + except TypeError: + if isinstance(ran, list) and len(ran) == 1: + ran = wrap_var(ran[0]) + else: + ran = wrap_var(ran) return ran else: From cea10794fda5c40130bd34abb801fff1419aa501 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sun, 21 Feb 2016 02:03:31 -0500 Subject: [PATCH 0690/1113] removed unused color var --- lib/ansible/plugins/callback/default.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index ea7b46969ca..b6d1d3f67cf 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -212,7 +212,6 @@ class CallbackModule(CallbackBase): def v2_playbook_on_include(self, included_file): msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) - color = C.COLOR_SKIP self._display.display(msg, color=C.COLOR_SKIP) def v2_playbook_on_stats(self, stats): From e35b1cf1549c38fc9f81481ac14ff3077ddb3d07 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sun, 21 Feb 2016 16:22:11 -0500 Subject: [PATCH 0691/1113] show task path on vv and above --- lib/ansible/plugins/callback/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index b6d1d3f67cf..6ef3352a1c4 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -126,7 +126,7 @@ class CallbackModule(CallbackBase): args = ', '.join(('%s=%s' % a for a in task.args.items())) args = ' %s' % args self._display.banner("TASK [%s%s]" % (task.get_name().strip(), args)) - if self._display.verbosity > 2: + if self._display.verbosity >= 2: path = task.get_path() if path: self._display.display("task path: %s" % path, color=C.COLOR_DEBUG) From b59b5a286b3a0247a9a6218be9fd2f24fc318d6c Mon Sep 17 00:00:00 2001 From: Matthew Huxtable <matthew.huxtable@sparx.co.uk> Date: Mon, 22 Feb 2016 16:07:48 +0000 Subject: [PATCH 0692/1113] docsite: add missing closing square bracket --- docsite/rst/playbooks_delegation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index fa808abb65b..eb3c33d60df 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -147,7 +147,7 @@ In 2.0, the directive `delegate_facts` may be set to `True` to assign the task's setup: delegate_to: "{{item}}" delegate_facts: True - with_items: "{{groups['dbservers'}}" + with_items: "{{groups['dbservers']}}" The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers. This way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`. From 283532e11d2fc8bec332cc82181082da67d36d52 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 22 Feb 2016 13:23:51 -0500 Subject: [PATCH 0693/1113] Updating IRC channel list in community doc --- docsite/rst/community.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index 4afd6cdadae..97ca9a866c0 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -251,7 +251,12 @@ channel or the general project mailing list. IRC Channel ----------- -Ansible has an IRC channel #ansible on irc.freenode.net. +Ansible has several IRC channels on Freenode (irc.freenode.net): + +- #ansible - For general use questions and support. +- #ansible-devel - For discussions on developer topics and code related to features/bugs. +- #ansible-meeting - For public community meetings. We will generally announce these on one or more of the above mailing lists. +- #ansible-notices - Mostly bot output from things like Github, etc. Notes on Priority Flags ----------------------- From 078ebb0dec17675dbb0aaf9d656d2c0042988d46 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 22 Feb 2016 17:09:58 -0500 Subject: [PATCH 0694/1113] Don't re-inject become* variables as it causes problems when templating is involved Prior to 75b6f61, we strictly limited variables we re-injected. After that patch however, we re-injected everything which causes problems under certain circumstances. For now, we'll continue to filter out some properties of PlayContext for re-injection. Fixes #14352 --- lib/ansible/playbook/play_context.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index a7c333a5520..2ff06857f33 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -513,8 +513,14 @@ class PlayContext(Base): ''' for prop, var_list in MAGIC_VARIABLE_MAPPING.items(): - var_val = getattr(self, prop, None) - if var_val is not None: + try: + if 'become' in prop: + continue + + var_val = getattr(self, prop) for var_opt in var_list: if var_opt not in variables: variables[var_opt] = var_val + except AttributeError: + continue + From 3f95f163ffdecc79976602feb882f5d47c2d755d Mon Sep 17 00:00:00 2001 From: ogenstad <patrick@ogenstad.com> Date: Tue, 23 Feb 2016 11:17:24 +0100 Subject: [PATCH 0695/1113] Avoids authentication failed exception if user has private rsa keys under .ssh --- lib/ansible/module_utils/shell.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/shell.py b/lib/ansible/module_utils/shell.py index 9a63c9821d2..b9e798603c7 100644 --- a/lib/ansible/module_utils/shell.py +++ b/lib/ansible/module_utils/shell.py @@ -88,7 +88,8 @@ class Shell(object): self.errors.extend(CLI_ERRORS_RE) def open(self, host, port=22, username=None, password=None, - timeout=10, key_filename=None, pkey=None, look_for_keys=None): + timeout=10, key_filename=None, pkey=None, look_for_keys=None, + allow_agent=False): self.ssh = paramiko.SSHClient() self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) @@ -100,7 +101,7 @@ class Shell(object): self.ssh.connect(host, port=port, username=username, password=password, timeout=timeout, look_for_keys=look_for_keys, pkey=pkey, - key_filename=key_filename) + key_filename=key_filename, allow_agent=allow_agent) self.shell = self.ssh.invoke_shell() self.shell.settimeout(10) @@ -199,4 +200,3 @@ def get_cli_connection(module): module.fail_json(msg='socket timed out') return cli - From b432faa39563278ea00806c264b37e66457baf2c Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Tue, 23 Feb 2016 12:15:24 +0100 Subject: [PATCH 0696/1113] Put the advice to the user as comments in the template Most issues include parts of the advice from the template, which adds noise to tickets. E.g. A lot of tickets include the text "Please pick one and delete the rest:" By adding the advice to the user as comments (<!--- comment -->) we achieve two important things: 1. The advice does not end up in the actual issue ticket or pull request 2. It is easier for the user to differentiate its own input and the original advice (And my help them to clean up the advice as well, which in fact is now no longer necessary) --- ISSUE_TEMPLATE.md | 26 ++++++++++++++++---------- PULL_REQUEST_TEMPLATE.md | 14 ++++++++------ 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index e67be84a3c3..17c0a14648b 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,6 +1,6 @@ ##### Issue Type: -Please pick one and delete the rest: +<!--- Please pick one and delete the rest: --> - Bug Report - Feature Idea - Documentation Report @@ -8,42 +8,48 @@ Please pick one and delete the rest: ##### Ansible Version: ``` -(Paste verbatim output from “ansible --version” here) +<!--- Paste verbatim output from “ansible --version” here --> ``` ##### Ansible Configuration: -Please mention any settings you've changed/added/removed in ansible.cfg +<!--- +Please mention any settings you have changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables). +--> ##### Environment: +<!--- Please mention the OS you are running Ansible from, and the OS you are -managing, or say “N/A” for anything that isn't platform-specific. +managing, or say “N/A” for anything that is not platform-specific. +--> ##### Summary: -Please explain the problem briefly. +<!--- Please explain the problem briefly --> ##### Steps To Reproduce: +<!--- For bugs, please show exactly how to reproduce the problem. For new features, show how the feature would be used. +--> ``` -(Paste example playbooks or commands here) +<!--- Paste example playbooks or commands here --> ``` -You can also paste gist.github.com links for larger files. +<!--- You can also paste gist.github.com links for larger files --> ##### Expected Results: -What did you expect to happen when running the steps above? +<!--- What did you expect to happen when running the steps above? --> ##### Actual Results: -What actually happened? +<!--- What actually happened? --> ``` -(Paste verbatim command output here) +<!--- Paste verbatim command output here --> ``` diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index d532449adef..3c348908ed3 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -1,6 +1,6 @@ ##### Issue Type: -Please pick one and delete the rest: +<!--- Please pick one and delete the rest: --> - Feature Pull Request - New Module Pull Request - Bugfix Pull Request @@ -9,19 +9,21 @@ Please pick one and delete the rest: ##### Ansible Version: ``` -(Paste verbatim output from “ansible --version” here) +<!--- Paste verbatim output from “ansible --version” here --> ``` ##### Summary: -Please describe the change and the reason for it. +<!--- Please describe the change and the reason for it --> -(If you're fixing an existing issue, please include "Fixes #nnn" in your +<!--- +If you are fixing an existing issue, please include "Fixes #nnn" in your commit message and your description; but you should still explain what -the change does.) +the change does. +--> ##### Example output: ``` -(Paste verbatim command output here if necessary) +<!-- Paste verbatim command output here if necessary --> ``` From 28b4f2a595bff7a1c3033a4e57c9a1c16e710097 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Strahinja=20Kustudi=C4=87?= <kustodian@gmail.com> Date: Tue, 23 Feb 2016 14:38:47 +0100 Subject: [PATCH 0697/1113] Adds ANSIBLE_VAULT_PASSWORD_FILE to the documentation site --- docsite/rst/playbooks_vault.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docsite/rst/playbooks_vault.rst b/docsite/rst/playbooks_vault.rst index 01836f796fd..f31d8fb9e91 100644 --- a/docsite/rst/playbooks_vault.rst +++ b/docsite/rst/playbooks_vault.rst @@ -104,6 +104,9 @@ Alternatively, passwords can be specified with a file or a script, the script ve The password should be a string stored as a single line in the file. +.. note:: + You can also set ``ANSIBLE_VAULT_PASSWORD_FILE`` environment variable, e.g. ``ANSIBLE_VAULT_PASSWORD_FILE=~/.vault_pass.txt`` and Ansible will automatically search for the password in that file. + If you are using a script instead of a flat file, ensure that it is marked as executable, and that the password is printed to standard output. If your script needs to prompt for data, prompts can be sent to standard error. This is something you may wish to do if using Ansible from a continuous integration system like Jenkins. From 7158eb489f1817adc01177f9d6d67417883e080b Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Fri, 19 Feb 2016 12:59:58 -0600 Subject: [PATCH 0698/1113] Add python info to facts --- lib/ansible/module_utils/facts.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index fd5729c7c0d..53c4133575f 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -166,6 +166,7 @@ class Facts(object): self.get_local_facts() self.get_env_facts() self.get_dns_facts() + self.get_python_facts() def populate(self): return self.facts @@ -782,6 +783,21 @@ class Facts(object): pass return size_total, size_available + def get_python_facts(self): + self.facts['python'] = { + 'version': { + 'major': sys.version_info[0], + 'minor': sys.version_info[1], + 'micro': sys.version_info[2], + 'releaselevel': sys.version_info[3], + 'serial': sys.version_info[4] + }, + 'version_info': list(sys.version_info), + 'executable': sys.executable, + 'type': sys.subversion[0] + } + + class Hardware(Facts): """ This is a generic Hardware subclass of Facts. This should be further From d9a207f24f46d76c3b05650b3e908c161786a2b9 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Tue, 23 Feb 2016 11:28:04 -0600 Subject: [PATCH 0699/1113] Add has_sslcontext fact as well --- lib/ansible/module_utils/facts.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 53c4133575f..e698d780ffb 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -44,6 +44,15 @@ try: except ImportError: HAVE_SELINUX=False +try: + # Check if we have SSLContext support + from ssl import create_default_context, SSLContext + del create_default_context + del SSLContext + HAS_SSLCONTEXT = True +except ImportError: + HAS_SSLCONTEXT = False + try: import json # Detect python-json which is incompatible and fallback to simplejson in @@ -794,7 +803,8 @@ class Facts(object): }, 'version_info': list(sys.version_info), 'executable': sys.executable, - 'type': sys.subversion[0] + 'type': sys.subversion[0], + 'has_sslcontext': HAS_SSLCONTEXT } From 3e9408e317c24bba448ea45c17a6c8c28543f6d6 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Tue, 23 Feb 2016 11:48:09 -0600 Subject: [PATCH 0700/1113] Add some additional integration tests that mix SSL verification and redirects --- .../integration/roles/test_uri/tasks/main.yml | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 4d8f9c7db09..234e32bcb50 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -153,6 +153,28 @@ that: - 'result.location|default("") == "http://httpbin.org/relative-redirect/1"' +- name: Check SSL with redirect + uri: + url: 'https://httpbin.org/redirect/2' + register: result + +- name: Assert SSL with redirect + assert: + that: + - 'result.url|default("") == "https://httpbin.org/get"' + +- name: redirect to bad SSL site + uri: + url: 'http://wrong.host.badssl.com' + register: result + ignore_errors: true + +- name: Ensure bad SSL site reidrect fails + assert: + that: + - result|failed + - '"wrong.host.badssl.com" in result.msg' + - name: test basic auth uri: url: 'http://httpbin.org/basic-auth/user/passwd' From 6cf6130468e477f3c6be58695045c3212053e80f Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 23 Feb 2016 13:12:38 -0500 Subject: [PATCH 0701/1113] Fixing/cleaning up do/until logic in TaskExecutor * Fixes bug where the task was not marked as failed if the number of retries were exceeded (#14461) * Reorganizing logic to be a bit cleaner, and so retrie messages are shown before sleeping (which makes way more sense) Fixes #14461 Fixes #14580 --- lib/ansible/executor/task_executor.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index dd3f69bd0f6..f96eb578808 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -409,10 +409,6 @@ class TaskExecutor: display.debug("starting attempt loop") result = None for attempt in range(retries): - if attempt > 0: - display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color=C.COLOR_DEBUG) - result['attempts'] = attempt + 1 - display.debug("running the handler") try: result = self._handler.run(task_vars=variables) @@ -469,16 +465,21 @@ class TaskExecutor: _evaluate_failed_when_result(result) if attempt < retries - 1: + if retries > 1: + result['attempts'] = attempt + 1 cond = Conditional(loader=self._loader) cond.when = [ self._task.until ] if cond.evaluate_conditional(templar, vars_copy): break # no conditional check, or it failed, so sleep for the specified time + display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-(attempt+1), result), color=C.COLOR_DEBUG) time.sleep(delay) - - elif 'failed' not in result: - break + else: + if retries > 1: + # we ran out of attempts, so mark the result as failed + result['attempts'] = retries + result['failed'] = True # do the final update of the local variables here, for both registered # values and any facts which may have been created From 6eb4633b07eb677d0ff118c92e90cc54be9a0c2d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 23 Feb 2016 15:06:37 -0500 Subject: [PATCH 0702/1113] always return a checksum key, even if empty --- lib/ansible/plugins/action/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index f1be406f6b1..28eb6ffe6e7 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -311,6 +311,10 @@ class ActionBase(with_metaclass(ABCMeta, object)): # empty might be matched, 1 should never match, also backwards compatible mystat['stat']['checksum'] = '1' + # happens sometimes when it is a dir and not on bsd + if not 'checksum' in mystat['stat']: + mystat['stat']['checksum'] = '' + return mystat['stat'] def _remote_checksum(self, path, all_vars): From e02b98274b60cdbc12ef4a4c74ae0f74207384e8 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 23 Feb 2016 15:07:06 -0500 Subject: [PATCH 0703/1113] issue callbacks per item and retry fails - now workers passes queue to task_executor so it can send back events per item and on retry attempt - updated result class to pass along events to strategy - base strategy updated to forward new events to callback - callbacks now remove 'items' on final result but process them directly when invoked per item - new callback method to deal with retry attempt messages (also now obeys nolog) - updated tests to match new signature of task_executor fixes #14558 fixes #14072 --- lib/ansible/executor/process/result.py | 13 +++++++++ lib/ansible/executor/process/worker.py | 1 + lib/ansible/executor/task_executor.py | 28 +++++++++++-------- lib/ansible/plugins/callback/__init__.py | 25 +++++------------ lib/ansible/plugins/callback/default.py | 33 ++++++++++++++--------- lib/ansible/plugins/strategy/__init__.py | 3 ++- test/units/executor/test_task_executor.py | 16 ++++++++++- 7 files changed, 74 insertions(+), 45 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index bb4c0dd0a39..7c75bbdfc21 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -104,6 +104,19 @@ class ResultProcess(multiprocessing.Process): time.sleep(0.0001) continue + # send callbacks for 'non final' results + if '_ansible_retry' in result._result: + self._send_result(('v2_playbook_retry', result)) + continue + elif '_ansible_item_result' in result._result: + if result.is_failed() or result.is_unreachable(): + self._send_result(('v2_playbook_item_on_failed', result)) + elif result.is_skipped(): + self._send_result(('v2_playbook_item_on_skipped', result)) + else: + self._send_result(('v2_playbook_item_on_ok', result)) + continue + clean_copy = strip_internal_keys(result._result) if 'invocation' in clean_copy: del clean_copy['invocation'] diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py index 24b9b3e5e03..7aa355aab8c 100644 --- a/lib/ansible/executor/process/worker.py +++ b/lib/ansible/executor/process/worker.py @@ -113,6 +113,7 @@ class WorkerProcess(multiprocessing.Process): self._new_stdin, self._loader, self._shared_loader_obj, + self._rslt_q ).run() debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index f96eb578808..162c70a2851 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -30,6 +30,7 @@ from ansible.compat.six import iteritems, string_types from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure +from ansible.executor.task_result import TaskResult from ansible.playbook.conditional import Conditional from ansible.playbook.task import Task from ansible.template import Templar @@ -60,7 +61,7 @@ class TaskExecutor: # the module SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS) - def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj): + def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, rslt_q): self._host = host self._task = task self._job_vars = job_vars @@ -69,6 +70,7 @@ class TaskExecutor: self._loader = loader self._shared_loader_obj = shared_loader_obj self._connection = None + self._rslt_q = rslt_q def run(self): ''' @@ -242,7 +244,9 @@ class TaskExecutor: # now update the result with the item info, and append the result # to the list of results res['item'] = item - #TODO: send item results to callback here, instead of all at the end + res['_ansible_item_result'] = True + + self._rslt_q.put(TaskResult(self._host, self._task, res), block=False) results.append(res) return results @@ -416,6 +420,9 @@ class TaskExecutor: return dict(unreachable=True, msg=to_unicode(e)) display.debug("handler run complete") + # preserve no log + result["_ansible_no_log"] = self._play_context.no_log + # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: @@ -465,16 +472,18 @@ class TaskExecutor: _evaluate_failed_when_result(result) if attempt < retries - 1: - if retries > 1: - result['attempts'] = attempt + 1 cond = Conditional(loader=self._loader) cond.when = [ self._task.until ] if cond.evaluate_conditional(templar, vars_copy): break - - # no conditional check, or it failed, so sleep for the specified time - display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-(attempt+1), result), color=C.COLOR_DEBUG) - time.sleep(delay) + else: + # no conditional check, or it failed, so sleep for the specified time + result['attempts'] = attempt + 1 + result['retries'] = retries + result['_ansible_retry'] = True + display.debug('Retrying task, attempt %d of %d' % (attempt + 1, retries)) + self._rslt_q.put(TaskResult(self._host, self._task, result), block=False) + time.sleep(delay) else: if retries > 1: # we ran out of attempts, so mark the result as failed @@ -506,9 +515,6 @@ class TaskExecutor: for k in ('ansible_host', ): result["_ansible_delegated_vars"][k] = delegated_vars.get(k) - # preserve no_log setting - result["_ansible_no_log"] = self._play_context.no_log - # and return display.debug("attempt loop complete, returning result") return result diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 19a861c89b9..3a33ddbc4f1 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -172,16 +172,8 @@ class CallbackBase: return item def _process_items(self, result): - for res in result._result['results']: - newres = self._copy_result_exclude(result, ['_result']) - res['item'] = self._get_item(res) - newres._result = res - if 'failed' in res and res['failed']: - self.v2_playbook_item_on_failed(newres) - elif 'skipped' in res and res['skipped']: - self.v2_playbook_item_on_skipped(newres) - else: - self.v2_playbook_item_on_ok(newres) + # just remove them as now they get handled by individual callbacks + del result._result['results'] def _clean_results(self, result, task_name): if 'changed' in result and task_name in ['debug']: @@ -346,15 +338,6 @@ class CallbackBase: if 'diff' in result._result: self.on_file_diff(host, result._result['diff']) - def v2_playbook_on_item_ok(self, result): - pass # no v1 - - def v2_playbook_on_item_failed(self, result): - pass # no v1 - - def v2_playbook_on_item_skipped(self, result): - pass # no v1 - def v2_playbook_on_include(self, included_file): pass #no v1 correspondance @@ -366,3 +349,7 @@ class CallbackBase: def v2_playbook_item_on_skipped(self, result): pass + + def v2_playbook_retry(self, result): + pass + diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 6ef3352a1c4..072eb5f4d25 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -51,6 +51,7 @@ class CallbackModule(CallbackBase): if result._task.loop and 'results' in result._result: self._process_items(result) + else: if delegated_vars: self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) @@ -159,24 +160,22 @@ class CallbackModule(CallbackBase): self._display.display(diff) def v2_playbook_item_on_ok(self, result): - delegated_vars = result._result.get('_ansible_delegated_vars', None) if result._task.action == 'include': return elif result._result.get('changed', False): - if delegated_vars: - msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "changed: [%s]" % result._host.get_name() + msg = 'changed' color = C.COLOR_CHANGED else: - if delegated_vars: - msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "ok: [%s]" % result._host.get_name() + msg = 'ok' color = C.COLOR_OK - msg += " => (item=%s)" % (result._result['item'],) + if delegated_vars: + msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) + else: + msg += ": [%s]" % result._host.get_name() + + msg += " => (item=%s)" % (self._get_item(result._result)) if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % self._dump_results(result._result) @@ -197,15 +196,17 @@ class CallbackModule(CallbackBase): # finally, remove the exception from the result so it's not shown every time del result._result['exception'] + msg = "failed: " if delegated_vars: - self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR) + msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: - self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR) + msg += "[%s]" % (result._host.get_name()) + self._display.display(msg + " (item=%s) => %s" % (self._get_item(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR) self._handle_warnings(result._result) def v2_playbook_item_on_skipped(self, result): - msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item']) + msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result)) if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % self._dump_results(result._result) self._display.display(msg, color=C.COLOR_SKIP) @@ -254,3 +255,9 @@ class CallbackModule(CallbackBase): val = getattr(self._options,option) if val: self._display.vvvv('%s: %s' % (option,val)) + + def v2_playbook_retry(self, result): + msg = "FAILED - RETRYING: %s (%d retries left)." % (result._task, result._result['retries'] - result._result['attempts']) + if (self._display.verbosity > 2 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: + msg += "Result was: %s" % self._dump_results(result._result) + self._display.display(msg, color=C.COLOR_DEBUG) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 29d67808765..8d40aaaefeb 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -329,7 +329,8 @@ class StrategyBase: self._variable_manager.set_nonpersistent_facts(target_host, facts) else: self._variable_manager.set_host_facts(target_host, facts) - + elif result[0].startswith('v2_playbook_item') or result[0] == 'v2_playbook_retry': + self._tqm.send_callback(result[0], result[1]) else: raise AnsibleError("unknown result message received: %s" % result[0]) diff --git a/test/units/executor/test_task_executor.py b/test/units/executor/test_task_executor.py index 7135a39ae2a..b029f87114c 100644 --- a/test/units/executor/test_task_executor.py +++ b/test/units/executor/test_task_executor.py @@ -45,6 +45,7 @@ class TestTaskExecutor(unittest.TestCase): mock_shared_loader = MagicMock() new_stdin = None job_vars = dict() + mock_queue = MagicMock() te = TaskExecutor( host = mock_host, task = mock_task, @@ -53,6 +54,7 @@ class TestTaskExecutor(unittest.TestCase): new_stdin = new_stdin, loader = fake_loader, shared_loader_obj = mock_shared_loader, + rslt_q = mock_queue, ) def test_task_executor_run(self): @@ -66,6 +68,7 @@ class TestTaskExecutor(unittest.TestCase): mock_play_context = MagicMock() mock_shared_loader = MagicMock() + mock_queue = MagicMock() new_stdin = None job_vars = dict() @@ -78,6 +81,7 @@ class TestTaskExecutor(unittest.TestCase): new_stdin = new_stdin, loader = fake_loader, shared_loader_obj = mock_shared_loader, + rslt_q = mock_queue, ) te._get_loop_items = MagicMock(return_value=None) @@ -97,7 +101,7 @@ class TestTaskExecutor(unittest.TestCase): def test_task_executor_get_loop_items(self): fake_loader = DictDataLoader({}) - + mock_host = MagicMock() mock_task = MagicMock() @@ -111,6 +115,7 @@ class TestTaskExecutor(unittest.TestCase): new_stdin = None job_vars = dict() + mock_queue = MagicMock() te = TaskExecutor( host = mock_host, @@ -120,6 +125,7 @@ class TestTaskExecutor(unittest.TestCase): new_stdin = new_stdin, loader = fake_loader, shared_loader_obj = mock_shared_loader, + rslt_q = mock_queue, ) items = te._get_loop_items() @@ -142,6 +148,7 @@ class TestTaskExecutor(unittest.TestCase): mock_play_context = MagicMock() mock_shared_loader = MagicMock() + mock_queue = MagicMock() new_stdin = None job_vars = dict() @@ -154,6 +161,7 @@ class TestTaskExecutor(unittest.TestCase): new_stdin = new_stdin, loader = fake_loader, shared_loader_obj = mock_shared_loader, + rslt_q = mock_queue, ) def _execute(variables): @@ -184,6 +192,7 @@ class TestTaskExecutor(unittest.TestCase): mock_play_context = MagicMock() mock_shared_loader = None + mock_queue = MagicMock() new_stdin = None job_vars = dict(pkg_mgr='yum') @@ -196,6 +205,7 @@ class TestTaskExecutor(unittest.TestCase): new_stdin = new_stdin, loader = fake_loader, shared_loader_obj = mock_shared_loader, + rslt_q = mock_queue, ) # @@ -279,6 +289,7 @@ class TestTaskExecutor(unittest.TestCase): mock_connection._connect.return_value = None mock_action = MagicMock() + mock_queue = MagicMock() shared_loader = None new_stdin = None @@ -292,6 +303,7 @@ class TestTaskExecutor(unittest.TestCase): new_stdin = new_stdin, loader = fake_loader, shared_loader_obj = shared_loader, + rslt_q = mock_queue, ) te._get_connection = MagicMock(return_value=mock_connection) @@ -330,6 +342,7 @@ class TestTaskExecutor(unittest.TestCase): mock_connection = MagicMock() mock_action = MagicMock() + mock_queue = MagicMock() shared_loader = MagicMock() shared_loader.action_loader = action_loader @@ -345,6 +358,7 @@ class TestTaskExecutor(unittest.TestCase): new_stdin = new_stdin, loader = fake_loader, shared_loader_obj = shared_loader, + rslt_q = mock_queue, ) te._connection = MagicMock() From da02aba173d49eabc890a1e3fbd2765ad69e9de3 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 23 Feb 2016 15:07:51 -0500 Subject: [PATCH 0704/1113] Don't inject PlayContext properties as variables if they're None Fixes bug introduced in 078ebb0 --- lib/ansible/playbook/play_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 2ff06857f33..34b9affade6 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -519,7 +519,7 @@ class PlayContext(Base): var_val = getattr(self, prop) for var_opt in var_list: - if var_opt not in variables: + if var_opt not in variables and var_val is not None: variables[var_opt] = var_val except AttributeError: continue From d74ea512871a0c24586a44c8141228d7d09e81f8 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 23 Feb 2016 15:18:40 -0500 Subject: [PATCH 0705/1113] Submodule update --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 8d126bd8774..7162623e867 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 8d126bd877444c9557b1671521516447cc557d3f +Subproject commit 7162623e8677953bbbc499940f5f7dd19dba680d diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f6c5ed987f7..f5e798f13ca 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f6c5ed987f7f8ec20ad1d417b4a39ba6bbc5d7bc +Subproject commit f5e798f13ca13938db76cc3ede85f0a05b9fc578 From 34541b4e5e2bef09f3fab2d008481eb8b2efbece Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 23 Feb 2016 15:37:21 -0500 Subject: [PATCH 0706/1113] preserve no_log for async also --- lib/ansible/executor/task_executor.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 162c70a2851..d9cb7170b33 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -441,6 +441,9 @@ class TaskExecutor: if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) + # ensure no log is preserved + result["_ansible_no_log"] = self._play_context.no_log + # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): if self._task.changed_when is not None: From 9de24a373522d1bef803c23e38fc989cd82852bf Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 23 Feb 2016 16:25:43 -0500 Subject: [PATCH 0707/1113] Assert things against the recursive copy result to make sure it did something --- test/integration/roles/test_copy/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml index edae89f56ad..8153ba1508a 100644 --- a/test/integration/roles/test_copy/tasks/main.yml +++ b/test/integration/roles/test_copy/tasks/main.yml @@ -117,6 +117,10 @@ register: recursive_copy_result - debug: var=recursive_copy_result +- name: assert that the recursive copy did something + assert: + that: + - "recursive_copy_result|changed" - name: check that a file in a directory was transferred stat: path={{output_dir}}/sub/subdir/bar.txt From 2db3f290ba81a0be7e1145c35aaeecfb431ebcbd Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 23 Feb 2016 16:29:57 -0500 Subject: [PATCH 0708/1113] Fix logic error when copying files recursively to a directory --- lib/ansible/plugins/action/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index f9cd4c59030..c50076c5430 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -184,7 +184,7 @@ class ActionModule(ActionBase): dest_file = self._connection._shell.join_path(dest, source_rel) dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow) - if not dest_status['exists'] and not force: + if dest_status['exists'] and not force: # remote_file does not exist so continue to next iteration. continue From 06977d5cb11599f9d1ad2ddd2ffa99852a78ad7e Mon Sep 17 00:00:00 2001 From: Etherdaemon <kaz.cheng@gmail.com> Date: Wed, 24 Feb 2016 16:41:30 +1000 Subject: [PATCH 0709/1113] Fixup boto3_conn as commit https://github.com/ansible/ansible/commit/6ea772931fba2151fb2fb86caab8f7be10cf5769 broke commit https://github.com/ansible/ansible/commit/27398131cf31eb7ca834a30ea2d8a871a937a377 --- lib/ansible/module_utils/ec2.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 7b93d9bb7e0..4fa7631f008 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -47,8 +47,6 @@ class AnsibleAWSError(Exception): def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): profile = params.pop('profile_name', None) - params['aws_session_token'] = params.pop('security_token', None) - params['verify'] = params.pop('validate_certs', None) if conn_type not in ['both', 'resource', 'client']: module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call') From 4160a3f40b8b4b45547b188473c93aaaa45eff51 Mon Sep 17 00:00:00 2001 From: brianlycett <brian.lycett@ontrackretail.co.uk> Date: Wed, 24 Feb 2016 15:06:21 +0000 Subject: [PATCH 0710/1113] Update YAMLSyntax.rst A brief explanation was added regarding multi-line values. --- docsite/rst/YAMLSyntax.rst | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index 4b2ec0535d8..b8d19099eb3 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -77,17 +77,20 @@ Ansible doesn't really use these too much, but you can also specify a boolean va likes_emacs: TRUE uses_cvs: false -Values can span multiple lines using *|* or *>* to include newlines or ignore them:: +Values can span multiple lines using *|* or *>*. Spanning multiple lines using a *|* will include the newlines. Using a *>* will ignore newlines; it's used to make what would otherwise be a very long line easier to read and edit. +In either case the indentation will be ignored. +Examples are:: + + include_newlines: | + exactly as you see + will appear these three + lines of poetry ignore_newlines: > this is really a single line of text despite appearances - - include_newlines: | - exactly as you see - will appear these three - lines of poetry + Let's combine what we learned so far in an arbitrary YAML example. This really has nothing to do with Ansible, but will give you a feel for the format:: From 03c33053aa4a9952bd80985ff2e1754b3e6b0dbf Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 24 Feb 2016 10:58:40 -0500 Subject: [PATCH 0711/1113] clarified become options do not imply others fixes #14635 --- docsite/rst/become.rst | 8 +++++--- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 3a6d6960900..93c96d9a3c3 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -15,6 +15,8 @@ privilege escalation tools, which you probably already use or have configured, l and execute tasks, create resources with the 2nd user's permissions. As of 1.9 `become` supersedes the old sudo/su, while still being backwards compatible. This new system also makes it easier to add other privilege escalation tools like `pbrun` (Powerbroker), `pfexec` and others. +.. note:: Setting any var or directive makes no implications on the values of the other related directives, i.e. setting become_user does not set become. + Directives ----------- @@ -24,7 +26,7 @@ become set to 'true'/'yes' to activate privilege escalation. become_user - set to user with desired privileges, the user you 'become', NOT the user you login as. + set to user with desired privileges, the user you 'become', NOT the user you login as. Does NOT imply `become: yes`, to allow it to be set at host level. become_method at play or task level overrides the default method set in ansible.cfg, set to 'sudo'/'su'/'pbrun'/'pfexec'/'doas' @@ -41,7 +43,7 @@ ansible_become_method allows to set privilege escalation method ansible_become_user - allows to set the user you become through privilege escalation + allows to set the user you become through privilege escalation, does not imply `ansible_become: True` ansible_become_pass allows you to set the privilege escalation password @@ -61,7 +63,7 @@ New command line options valid choices: [ sudo | su | pbrun | pfexec | doas ] --become-user=BECOME_USER - run operations as this user (default=root) + run operations as this user (default=root), does not imply --become/-b For those from Pre 1.9 , sudo and su still work! diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 7162623e867..8d126bd8774 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 7162623e8677953bbbc499940f5f7dd19dba680d +Subproject commit 8d126bd877444c9557b1671521516447cc557d3f diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f5e798f13ca..f6c5ed987f7 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f5e798f13ca13938db76cc3ede85f0a05b9fc578 +Subproject commit f6c5ed987f7f8ec20ad1d417b4a39ba6bbc5d7bc From 66756078f3ec18d982f1775007eca423804d0121 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Wed, 24 Feb 2016 10:51:16 -0500 Subject: [PATCH 0712/1113] Updated auto-install-roles proposal. --- docs/proposals/auto-install-roles.md | 57 ++++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 7 deletions(-) diff --git a/docs/proposals/auto-install-roles.md b/docs/proposals/auto-install-roles.md index cd2406a0f5b..b63349bb302 100644 --- a/docs/proposals/auto-install-roles.md +++ b/docs/proposals/auto-install-roles.md @@ -15,9 +15,11 @@ ansible-galaxy install -r path/to/rolesfile.yml -p path/to/rolesdir -f ansible-playbook run-the-playbook.yml ``` -The most likely step in this process to be forgotten is the middle step. While -we can improve processes and documentation to try and ensure that this step is -not skipped, we can improve ansible-playbook so that the step is not required. +### Problems + +- The most likely step in this process to be forgotten is the middle step. While we can improve processes and documentation to try and ensure that this step is not skipped, we can improve ansible-playbook so that the step is not required. +- Ansible-galaxy does ot sufficiently handle versioning. +- There is not a consistent format for specifying a role in a playbook or a dependent role in meta/main.yml. ## Approaches @@ -98,9 +100,50 @@ content within the playbook: - New configuration defaults would likely still be required (and possibly an override keyword for rolesdir and role auto update) + +### Approach 3: + +*Author*: chouseknecht<@chouseknecht> + +*Date*: 24/02/2016 + +This is a combination of ideas taken from IRC, the ansible development group, and conversations at the recent contributor's summit. It also incorporates most of the ideas from Approach 1 (above) with two notable texceptions: 1) it elmintates maintaing a roles file (or what we think of today as requirements.yml); and 2) it does not include the definition of rolesdir in the playbook. + +Here's the approach: + +- Share the role install logic between ansible-playbook and ansible-galaxy so that ansible-playbook can resolve and install missing roles at playbook run time simply by evaluating the playbook. +- Ansible-galaxy installs or preloads roles also by examining a playbook. +- Deprecate support for requirements.yaml (the two points above make it unnecessary). +- Make ansible-playbook auto-downloading of roles configurable in ansible.cfg. In certain circumstance it may be desirable to disable auto-download. +- Provide one format for specifying a role whether in a playbook or in meta/main.yml. Suggested format: + + ``` + { + 'scm': 'git', + 'src': 'http://git.example.com/repos/repo.git', + 'version': 'v1.0', + 'name': 'repo’ + } + ``` + +- Refactor the install process to encompass the following : + + - Idempotency - If a role version is already installed, don’t attempt to install it again. If symlinks are present (see below), don’t break or remove them. + - Provide a --force option that overrides idempotency. + - Install roles via tree-ish references, not just tags or commits (PR exists for this). + - Support a whitelist of role sources. Galaxy should not be automatically assumed to be part of the whitelist. + - Continue to be recursive, allowing roles to have dependencies specified in meta/main.yml. + - Continue to install roles in the roles_path. + - Use a symlink approach to managing role versions in the roles_path. Example: + + ``` + roles/ + briancoca.oracle_java7.v1.0 + briancoca.oracle_java7.v2.2 + briancoca.oracle_java7.qs3ih6x + briancoca.oracle_java7 => briancoca.oracle_java7.qs3ih6x + ``` + ## Conclusion -The author's preferred approach is currently Approach 1. - -Feedback is requested to improve either approach, or provide further -approaches to solve this problem. +Feedback is requested to improve any of the above approaches, or provide further approaches to solve this problem. From 9c9cce51ab1c0f1543f646d0dd81fc730fce6eaa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 24 Feb 2016 08:53:28 -0800 Subject: [PATCH 0713/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 8d126bd8774..e9454fa44f5 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 8d126bd877444c9557b1671521516447cc557d3f +Subproject commit e9454fa44f5ff507c0dad3ed91a866854287e4dc diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f6c5ed987f7..fade5b79363 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f6c5ed987f7f8ec20ad1d417b4a39ba6bbc5d7bc +Subproject commit fade5b7936342bd289e20da7413617780bb330b6 From cd51ba7965325fd5e7857e4cf2c3725b81b39352 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 24 Feb 2016 13:08:59 -0500 Subject: [PATCH 0714/1113] Use abspath instead of realpath for group/host vars files The use of realpath means when following symlinks the actual path is used when loading these files in the VariableManager, which may not line up with the host or group name specified. Fixes #14545 --- lib/ansible/inventory/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 3d9ad3516d9..d10a731faaf 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -739,11 +739,11 @@ class Inventory(object): if group and host is None: # load vars in dir/group_vars/name_of_group - base_path = os.path.realpath(os.path.join(to_unicode(basedir, errors='strict'), "group_vars/%s" % group.name)) + base_path = os.path.abspath(os.path.join(to_unicode(basedir, errors='strict'), "group_vars/%s" % group.name)) results = combine_vars(results, self._variable_manager.add_group_vars_file(base_path, self._loader)) elif host and group is None: # same for hostvars in dir/host_vars/name_of_host - base_path = os.path.realpath(os.path.join(to_unicode(basedir, errors='strict'), "host_vars/%s" % host.name)) + base_path = os.path.abspath(os.path.join(to_unicode(basedir, errors='strict'), "host_vars/%s" % host.name)) results = combine_vars(results, self._variable_manager.add_host_vars_file(base_path, self._loader)) # all done, results is a dictionary of variables for this particular host. From c737bd48bc4ee246c378898abb80bacdd80c0e2f Mon Sep 17 00:00:00 2001 From: Monty Taylor <mordred@inaugust.com> Date: Wed, 24 Feb 2016 12:36:50 -0600 Subject: [PATCH 0715/1113] Deduplicate true duplicate entries in the openstack inventory There are cases where the host list back from the cloud comes back duplicated. This causes us to report those with UUIDs, which we do to support truly different servers with the same name. However, in the case where duplicate host entries have the same UUID, we can know it's a data hiccup. --- contrib/inventory/openstack.py | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py index b82a042c29e..1c7207a9e17 100755 --- a/contrib/inventory/openstack.py +++ b/contrib/inventory/openstack.py @@ -112,6 +112,14 @@ def get_host_groups(inventory, refresh=False): return groups +def append_hostvars(hostvars, groups, key, server, namegroup=False): + hostvars[key] = dict( + ansible_ssh_host=server['interface_ip'], + openstack=server) + for group in get_groups_from_server(server, namegroup=namegroup): + groups[group].append(key) + + def get_host_groups_from_cloud(inventory): groups = collections.defaultdict(list) firstpass = collections.defaultdict(list) @@ -130,20 +138,19 @@ def get_host_groups_from_cloud(inventory): firstpass[server['name']].append(server) for name, servers in firstpass.items(): if len(servers) == 1 and use_hostnames: - server = servers[0] - hostvars[name] = dict( - ansible_ssh_host=server['interface_ip'], - openstack=server) - for group in get_groups_from_server(server, namegroup=False): - groups[group].append(server['name']) + append_hostvars(hostvars, groups, name, servers[0]) else: + server_ids = set() + # Trap for duplicate results for server in servers: - server_id = server['id'] - hostvars[server_id] = dict( - ansible_ssh_host=server['interface_ip'], - openstack=server) - for group in get_groups_from_server(server, namegroup=True): - groups[group].append(server_id) + server_ids.add(server['id']) + if len(server_ids) == 1 and use_hostnames: + append_hostvars(hostvars, groups, name, servers[0]) + else: + for server in servers: + append_hostvars( + hostvars, groups, server['id'], servers[0], + namegroup=True) groups['_meta'] = {'hostvars': hostvars} return groups From 064cdec6ffca072acf18c5e72f3a0d725b07a644 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 24 Feb 2016 19:21:43 -0500 Subject: [PATCH 0716/1113] added note for verbosity --- ISSUE_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 17c0a14648b..56907da419c 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -48,7 +48,7 @@ features, show how the feature would be used. ##### Actual Results: -<!--- What actually happened? --> +<!--- What actually happened? If possible run with high verbosity (-vvvv) --> ``` <!--- Paste verbatim command output here --> From c1d90e3f2d8d866946ed48f2fca676ef0fa41fc4 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Thu, 25 Feb 2016 16:37:48 +0100 Subject: [PATCH 0717/1113] Added a section wrt. hybrid plugins and provide an example for lookup plugins --- docsite/rst/porting_guide_2.0.rst | 124 ++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index b2b6b15dd1c..6f08bd01c45 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -254,6 +254,130 @@ Connection plugins * connection plugins +Hybrid plugins +============== +In specific cases you may want a plugin that supports both ansible-1.9.x *and* +ansible-2.0. Much like porting plugins from v1 to v2, you need to understand +how plugins work in each version and support both requirements. It may mean +playing tricks on Ansible. + +Since the ansible-2.0 plugin system is more advanced, it is easier to adapt +your plugin to provide similar pieces (subclasses, methods) for ansible-1.9.x +as ansible-2.0 expects. This way your code will look a lot cleaner. + +You may find the following tips useful: + +* Check whether the ansible-2.0 class(es) are available and if they are missing + (ansible-1.9.x) mimic them with the needed methods (e.g. `__init__`) + +* When ansible-2.0 python modules are imported, and they fail (ansible-1.9.x), + catch the `ImportError` exception and perform the equivalent imports for + ansible-1.9.x. With possible translations (e.g. importing specific methods). + +* Use the existence of these methods as a qualifier to what version of Ansible + you are running. So rather than using version checks, you can do capability + checks instead. (See examples below) + +* Document for each if-then-else case for which specific version each block is + needed. This will help others to understand how they have to adapt their + plugins, but it will also help you to remove the older ansible-1.9.x support + when it is deprecated. + +* When doing plugin development, it is very useful to have the `warning()` + method during development, but it is also important to emit warnings for + deadends (cases that you expect should never be triggered) or corner cases + (e.g. cases where you expect misconfigurations). + + +Lookup plugins +-------------- +As a simple example we are going to make a hybrid `fileglob` lookup plugin. +The `fileglob` lookup plugin is pretty simple to understand:: + + from __future__ import (absolute_import, division, print_function) + __metaclass__ = type + + import os + import glob + + try: + # ansible-2.0 + from ansible.plugins.lookup import LookupBase + except ImportError: + # ansible-1.9.x + + class LookupBase(object): + def __init__(self, basedir=None, runner=None, **kwargs): + self.runner = runner + self.basedir = self.runner.basedir + + def get_basedir(self, variables): + return self.basedir + + try: + # ansible-1.9.x + from ansible.utils import (listify_lookup_plugin_terms, path_dwim, warning) + except ImportError: + # ansible-2.0 + from __main__ import display + warning = display.warning + + class LookupModule(LookupBase): + + # For ansible-1.9.x, we added inject=None as valid arguments + def run(self, terms, inject=None, variables=None, **kwargs): + + # ansible-2.0, but we made this work for ansible-1.9.x too ! + basedir = self.get_basedir(variables) + + # ansible-1.9.x + if 'listify_lookup_plugin_terms' in globals(): + terms = listify_lookup_plugin_terms(terms, basedir, inject) + + ret = [] + for term in terms: + term_file = os.path.basename(term) + + # For ansible-1.9.x, we imported path_dwim() from ansible.utils + if 'path_dwim' in globals(): + # ansible-1.9.x + dwimmed_path = path_dwim(basedir, os.path.dirname(term)) + else: + # ansible-2.0 + dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term)) + + globbed = glob.glob(os.path.join(dwimmed_path, term_file)) + ret.extend(g for g in globbed if os.path.isfile(g)) + + return ret + +Note that in the above example we did not use the `warning()` method as we +had no direct use for it in the final version. However we left this code in +so people can use this part during development/porting/use. + + + +Connection plugins +------------------ + +* connection plugins + +Action plugins +-------------- + +* action plugins + +Callback plugins +---------------- + +* callback plugins + +Connection plugins +------------------ + +* connection plugins + + Porting custom scripts ====================== From b3aa373932848930606d4cd2e34c4e325820bcca Mon Sep 17 00:00:00 2001 From: Joel <joel@deport.me> Date: Thu, 25 Feb 2016 09:52:56 -0600 Subject: [PATCH 0718/1113] Update the profile task callback plugin to include a fix for duplicate named tasks. Added additional features to adjust the number of tasks output and the sort order. --- lib/ansible/plugins/callback/profile_tasks.py | 57 ++++++++++++------- .../plugins/callback/profile_tasks.rst | 19 ++++--- 2 files changed, 47 insertions(+), 29 deletions(-) diff --git a/lib/ansible/plugins/callback/profile_tasks.py b/lib/ansible/plugins/callback/profile_tasks.py index e4004c97d41..644c5653a56 100644 --- a/lib/ansible/plugins/callback/profile_tasks.py +++ b/lib/ansible/plugins/callback/profile_tasks.py @@ -1,3 +1,4 @@ +# (C) 2016, Joel, http://github.com/jjshoe # (C) 2015, Tom Paine, <github@aioue.net> # (C) 2014, Jharrod LaFon, @JharrodLaFon # (C) 2012-2013, Michael DeHaan, <michael.dehaan@gmail.com> @@ -22,6 +23,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import collections +import os import time from ansible.plugins.callback import CallbackBase @@ -49,7 +52,7 @@ def filled(msg, fchar="*"): def timestamp(self): if self.current is not None: - self.stats[self.current] = time.time() - self.stats[self.current] + self.stats[self.current]['time'] = time.time() - self.stats[self.current]['time'] def tasktime(): @@ -72,12 +75,22 @@ class CallbackModule(CallbackBase): CALLBACK_NEEDS_WHITELIST = True def __init__(self): - self.stats = {} + self.stats = collections.OrderedDict() self.current = None + self.sort_order = os.getenv('PROFILE_TASKS_SORT_ORDER', True) + self.task_output_limit = os.getenv('PROFILE_TASKS_TASK_OUTPUT_LIMIT', 20) + + if self.sort_order == 'ascending': + self.sort_order = False; + + if self.task_output_limit == 'all': + self.task_output_limit = None + else: + self.task_output_limit = int(self.task_output_limit) super(CallbackModule, self).__init__() - def _record_task(self, name): + def _record_task(self, task): """ Logs the start of each task """ @@ -85,14 +98,14 @@ class CallbackModule(CallbackBase): timestamp(self) # Record the start time of the current task - self.current = name - self.stats[self.current] = time.time() + self.current = task._uuid + self.stats[self.current] = {'time': time.time(), 'name': task.get_name(), 'path': task.get_path()} - def playbook_on_task_start(self, name, is_conditional): - self._record_task(name) + def v2_playbook_on_task_start(self, task, is_conditional): + self._record_task(task) def v2_playbook_on_handler_task_start(self, task): - self._record_task('HANDLER: ' + task.name) + self._record_task(task) def playbook_on_setup(self): self._display.display(tasktime()) @@ -103,21 +116,25 @@ class CallbackModule(CallbackBase): timestamp(self) - # Sort the tasks by their running time - results = sorted( - self.stats.items(), - key=lambda value: value[1], - reverse=True, - ) + results = self.stats.items() - # Just keep the top 20 - results = results[:20] + # Sort the tasks by the specified sort + if self.sort_order != 'none': + results = sorted( + self.stats.iteritems(), + key=lambda x:x[1]['time'], + reverse=self.sort_order, + ) + + # Display the number of tasks specified or the default of 20 + results = results[:self.task_output_limit] # Print the timings - for name, elapsed in results: + for uuid, result in results: self._display.display( - "{0:-<70}{1:->9}".format( - '{0} '.format(name), - ' {0:.02f}s'.format(elapsed), + "{0:-<70}{1:-<70}{2:->9}".format( + '{0} '.format(result['path']), + '{0} '.format(result['name']), + ' {0:.02f}s'.format(result['time']), ) ) diff --git a/lib/ansible/plugins/callback/profile_tasks.rst b/lib/ansible/plugins/callback/profile_tasks.rst index a125d64260d..97c0685d62a 100644 --- a/lib/ansible/plugins/callback/profile_tasks.rst +++ b/lib/ansible/plugins/callback/profile_tasks.rst @@ -15,6 +15,11 @@ Add ``profile_tasks`` to the ``callback_whitelist`` in ``ansible.cfg``. Run playbooks as normal. +Certain options are configurable using environment variables. You can specify ``ascending`` or ``none`` for +the environment variable ``PROFILE_TASKS_SORT_ORDER`` to adjust sorting output. If you want to see more than +20 tasks in the output you can set ``PROFILE_TASKS_TASK_OUTPUT_LIMIT`` to any number, or the special value +``all`` to get a list of all tasks. + Features -------- @@ -53,15 +58,11 @@ No more wondering how old the results in a terminal window are. PLAY RECAP ******************************************************************** Thursday 11 June 2016 22:51:00 +0100 (0:00:01.011) 0:00:43.247 ********* =============================================================================== - really slow task | Download project packages----------------------------11.61s - security | Really slow security policies----------------------------------7.03s - common-base | Install core system dependencies----------------------------3.62s - common | Install pip------------------------------------------------------3.60s - common | Install boto-----------------------------------------------------3.57s - nginx | Install nginx-----------------------------------------------------3.41s - serf | Install system dependencies----------------------------------------3.38s - duo_security | Install Duo Unix SSH Integration---------------------------3.37s - loggly | Install TLS version----------------------------------------------3.36s + /home/bob/ansible/roles/old_and_slow/tasks/main.yml:4 ----------------old_and_slow : install tons of packages -------------------------------- 20.03s + /home/bob/ansible/roles/db/tasks/main.yml:4 --------------------------db : second task to run ------------------------------------------------- 2.03s + None -----------------------------------------------------------------setup ------------------------------------------------------------------- 0.42s + /home/bob/ansible/roles/www/tasks/main.yml:1 -------------------------www : first task to run ------------------------------------------------- 0.03s + /home/bob/ansible/roles/fast_task.yml:1 ------------------------------fast_task : first task to run ------------------------------------------- 0.01s Compatibility ------------- From e5600ee204bdc5efa0f60786d5222de0ee21a0c7 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Thu, 25 Feb 2016 17:12:24 +0100 Subject: [PATCH 0719/1113] Turn it in correct reStructuredText Why not AsciiDoc is beyond me, as it is much easier for documentation. --- docsite/rst/porting_guide_2.0.rst | 41 +++++++++---------------------- 1 file changed, 12 insertions(+), 29 deletions(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 6f08bd01c45..e7b0f2146ea 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -256,43 +256,28 @@ Connection plugins Hybrid plugins ============== -In specific cases you may want a plugin that supports both ansible-1.9.x *and* -ansible-2.0. Much like porting plugins from v1 to v2, you need to understand -how plugins work in each version and support both requirements. It may mean -playing tricks on Ansible. +In specific cases you may want a plugin that supports both ansible-1.9.x *and* ansible-2.0. Much like porting plugins from v1 to v2, you need to understand how plugins work in each version and support both requirements. It may mean playing tricks on Ansible. -Since the ansible-2.0 plugin system is more advanced, it is easier to adapt -your plugin to provide similar pieces (subclasses, methods) for ansible-1.9.x -as ansible-2.0 expects. This way your code will look a lot cleaner. +Since the ansible-2.0 plugin system is more advanced, it is easier to adapt your plugin to provide similar pieces (subclasses, methods) for ansible-1.9.x as ansible-2.0 expects. This way your code will look a lot cleaner. You may find the following tips useful: -* Check whether the ansible-2.0 class(es) are available and if they are missing - (ansible-1.9.x) mimic them with the needed methods (e.g. `__init__`) +* Check whether the ansible-2.0 class(es) are available and if they are missing (ansible-1.9.x) mimic them with the needed methods (e.g. ``__init__``) -* When ansible-2.0 python modules are imported, and they fail (ansible-1.9.x), - catch the `ImportError` exception and perform the equivalent imports for - ansible-1.9.x. With possible translations (e.g. importing specific methods). +* When ansible-2.0 python modules are imported, and they fail (ansible-1.9.x), catch the ``ImportError`` exception and perform the equivalent imports for ansible-1.9.x. With possible translations (e.g. importing specific methods). -* Use the existence of these methods as a qualifier to what version of Ansible - you are running. So rather than using version checks, you can do capability - checks instead. (See examples below) +* Use the existence of these methods as a qualifier to what version of Ansible you are running. So rather than using version checks, you can do capability checks instead. (See examples below) -* Document for each if-then-else case for which specific version each block is - needed. This will help others to understand how they have to adapt their - plugins, but it will also help you to remove the older ansible-1.9.x support - when it is deprecated. +* Document for each if-then-else case for which specific version each block is needed. This will help others to understand how they have to adapt their plugins, but it will also help you to remove the older ansible-1.9.x support when it is deprecated. -* When doing plugin development, it is very useful to have the `warning()` - method during development, but it is also important to emit warnings for - deadends (cases that you expect should never be triggered) or corner cases - (e.g. cases where you expect misconfigurations). +* When doing plugin development, it is very useful to have the ``warning()`` method during development, but it is also important to emit warnings for deadends (cases that you expect should never be triggered) or corner cases (e.g. cases where you expect misconfigurations). + +* It helps to look at other plugins in ansible-1.9.x and ansible-2.0 to understand how the API works and what modules, classes and methods are available. Lookup plugins -------------- -As a simple example we are going to make a hybrid `fileglob` lookup plugin. -The `fileglob` lookup plugin is pretty simple to understand:: +As a simple example we are going to make a hybrid ``fileglob`` lookup plugin. The ``fileglob`` lookup plugin is pretty simple to understand:: from __future__ import (absolute_import, division, print_function) __metaclass__ = type @@ -324,7 +309,7 @@ The `fileglob` lookup plugin is pretty simple to understand:: class LookupModule(LookupBase): - # For ansible-1.9.x, we added inject=None as valid arguments + # For ansible-1.9.x, we added inject=None as valid argument def run(self, terms, inject=None, variables=None, **kwargs): # ansible-2.0, but we made this work for ansible-1.9.x too ! @@ -351,9 +336,7 @@ The `fileglob` lookup plugin is pretty simple to understand:: return ret -Note that in the above example we did not use the `warning()` method as we -had no direct use for it in the final version. However we left this code in -so people can use this part during development/porting/use. +.. Note:: In the above example we did not use the ``warning()`` method as we had no direct use for it in the final version. However we left this code in so people can use this part during development/porting/use. From 2d56293768bed49bd992a573618c8021bd415722 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 25 Feb 2016 11:25:17 -0500 Subject: [PATCH 0720/1113] default play name to hosts entry return to previous behaviour as now the name was left blank --- lib/ansible/playbook/play.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index bc033148646..00598088468 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -105,6 +105,8 @@ class Play(Base, Taggable, Become): @staticmethod def load(data, variable_manager=None, loader=None): + if ('name' not in data or data['name'] is None) and 'hosts' in data: + data['name'] = data['hosts'] p = Play() return p.load_data(data, variable_manager=variable_manager, loader=loader) From e2d2798a4238d95590777e8d41b9b28f324bd91d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 25 Feb 2016 11:29:44 -0500 Subject: [PATCH 0721/1113] hosts can be list or string --- lib/ansible/playbook/play.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 00598088468..c354b745496 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -106,7 +106,10 @@ class Play(Base, Taggable, Become): @staticmethod def load(data, variable_manager=None, loader=None): if ('name' not in data or data['name'] is None) and 'hosts' in data: - data['name'] = data['hosts'] + if isinstance(data['hosts'], list): + data['name'] = ','.join(data['hosts']) + else: + data['name'] = data['hosts'] p = Play() return p.load_data(data, variable_manager=variable_manager, loader=loader) From 771f1e31a9588129eb92b269ac94fb714780d400 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 25 Feb 2016 12:36:44 -0500 Subject: [PATCH 0722/1113] Rework the way ad-hoc filters inventory to match how cli/playbook does it --- lib/ansible/cli/adhoc.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 7a3e208f368..dcb65ad135f 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -124,13 +124,17 @@ class AdHocCLI(CLI): inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) variable_manager.set_inventory(inventory) + no_hosts = False + if len(inventory.list_hosts(pattern)) == 0: + # Empty inventory + display.warning("provided hosts list is empty, only localhost is available") + no_hosts = True - if self.options.subset: - inventory.subset(self.options.subset) - + inventory.subset(self.options.subset) hosts = inventory.list_hosts(pattern) - if len(hosts) == 0: - raise AnsibleError("Specified hosts options do not match any hosts") + if len(hosts) == 0 and no_hosts is False: + # Invalid limit + raise AnsibleError("Specified --limit does not match any hosts") if self.options.listhosts: display.display(' hosts (%d):' % len(hosts)) From fbdcb22e36f6aeb01e10f972b8a4e7d691b1f908 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 25 Feb 2016 16:41:50 -0500 Subject: [PATCH 0723/1113] now generate list of playbook ojbect directives TODO: needs links/info and conditionals added --- .gitignore | 1 + docsite/Makefile | 6 +++- docsite/rst/playbooks_special_topics.rst | 1 + hacking/dump_playbook_attributes.py | 33 +++++++++++++++++++ hacking/templates/playbooks_directives.rst.j2 | 19 +++++++++++ 5 files changed, 59 insertions(+), 1 deletion(-) create mode 100755 hacking/dump_playbook_attributes.py create mode 100644 hacking/templates/playbooks_directives.rst.j2 diff --git a/.gitignore b/.gitignore index 2392614453b..bbe61d075b9 100644 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ docs/man/man3/* *.sublime-workspace # docsite stuff... docsite/rst/modules_by_category.rst +docsite/rst/playbooks_directives.rst docsite/rst/list_of_*.rst docsite/rst/*_module.rst docsite/*.html diff --git a/docsite/Makefile b/docsite/Makefile index 2b87827c597..f7f5e533271 100644 --- a/docsite/Makefile +++ b/docsite/Makefile @@ -1,10 +1,11 @@ #!/usr/bin/make SITELIB = $(shell python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()") FORMATTER=../hacking/module_formatter.py +DUMPER=../hacking/dump_playbook_attributes.py all: clean docs -docs: clean modules staticmin +docs: clean directives modules staticmin ./build-site.py -(cp *.ico htmlout/) -(cp *.jpg htmlout/) @@ -41,6 +42,9 @@ clean: .PHONEY: docs clean +directives: $(FORMATTER) ../hacking/templates/rst.j2 + PYTHONPATH=../lib $(DUMPER) --template-dir=../hacking/templates --output-dir=rst/ + modules: $(FORMATTER) ../hacking/templates/rst.j2 PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../lib/ansible/modules -o rst/ diff --git a/docsite/rst/playbooks_special_topics.rst b/docsite/rst/playbooks_special_topics.rst index 943f2674eb0..6593d20a54a 100644 --- a/docsite/rst/playbooks_special_topics.rst +++ b/docsite/rst/playbooks_special_topics.rst @@ -20,3 +20,4 @@ and adopt these only if they seem relevant or useful to your environment. playbooks_tags playbooks_vault playbooks_startnstep + playbooks_directives diff --git a/hacking/dump_playbook_attributes.py b/hacking/dump_playbook_attributes.py new file mode 100755 index 00000000000..3fa2b410846 --- /dev/null +++ b/hacking/dump_playbook_attributes.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python2 + +import optparse +from jinja2 import Environment, FileSystemLoader + +from ansible.playbook import Play +from ansible.playbook.block import Block +from ansible.playbook.role import Role +from ansible.playbook.task import Task + +template_file = 'playbooks_directives.rst.j2' +oblist = {} +for aclass in Play, Block, Role, Task: + aobj = aclass() + oblist[type(aobj).__name__] = aobj + +p = optparse.OptionParser( + version='%prog 1.0', + usage='usage: %prog [options]', + description='Generate module documentation from metadata', +) +p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates") +p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/', help="Output directory for rst files") + +(options, args) = p.parse_args() + +env = Environment(loader=FileSystemLoader(options.template_dir), trim_blocks=True,) +template = env.get_template(template_file) +outputname = options.output_dir + template_file.replace('.j2','') +tempvars = { 'oblist': oblist } + +with open( outputname, 'w') as f: + f.write(template.render(tempvars)) diff --git a/hacking/templates/playbooks_directives.rst.j2 b/hacking/templates/playbooks_directives.rst.j2 new file mode 100644 index 00000000000..0dc9408e430 --- /dev/null +++ b/hacking/templates/playbooks_directives.rst.j2 @@ -0,0 +1,19 @@ +Directives Glossary +=================== + +Here we list the common playbook objects and the possible directives that can be used with them. +Note that not all directives affect the object itself and might just be there to be inherited by other contained objects. + +.. contents:: + :local: + :depth: 1 + +{% for name in oblist %} + +{{ name }} +{{ '-' * name|length }} +{% for attribute in oblist[name].__dict__['_attributes']|sort %} + * {{ attribute }} +{% endfor %} + +{% endfor %} From ad5a6e7993cf1c384b75a30812215ee22dfcedd1 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 25 Feb 2016 17:23:20 -0500 Subject: [PATCH 0724/1113] updated submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e9454fa44f5..132b8e8f511 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e9454fa44f5ff507c0dad3ed91a866854287e4dc +Subproject commit 132b8e8f511d5ce7af01cf82254182406c56aecd diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index fade5b79363..479f99678b2 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit fade5b7936342bd289e20da7413617780bb330b6 +Subproject commit 479f99678b267b9c42c9c76504b9c528400eaf70 From c4ecbad663ca6235579407d8e5c854405eeaa039 Mon Sep 17 00:00:00 2001 From: Gabriel Burkholder <skinlayers@skinlayers.net> Date: Thu, 25 Feb 2016 14:21:28 -0800 Subject: [PATCH 0725/1113] Cleans up extra whitespace in ansible.cfg --- examples/ansible.cfg | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 9c5b3bedc1f..c9dc7592a40 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -1,7 +1,7 @@ # config file for ansible -- http://ansible.com/ # ============================================== -# nearly all parameters can be overridden in ansible-playbook +# nearly all parameters can be overridden in ansible-playbook # or with command line flags. ansible will read ANSIBLE_CONFIG, # ansible.cfg in the current working directory, .ansible.cfg in # the home directory or /etc/ansible/ansible.cfg, whichever it @@ -81,7 +81,7 @@ # list any Jinja2 extensions to enable here: #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n -# if set, always use this private key file for authentication, same as +# if set, always use this private key file for authentication, same as # if passing --private-key to ansible or ansible-playbook #private_key_file = /path/to/file @@ -93,8 +93,8 @@ #ansible_managed = Ansible managed: {file} on {host} # by default, ansible-playbook will display "Skipping [host]" if it determines a task -# should not be run on a host. Set this to "False" if you don't want to see these "Skipping" -# messages. NOTE: the task header will still be shown regardless of whether or not the +# should not be run on a host. Set this to "False" if you don't want to see these "Skipping" +# messages. NOTE: the task header will still be shown regardless of whether or not the # task is skipped. #display_skipped_hosts = True @@ -108,7 +108,7 @@ # safely set this to True to get more informative messages. #display_args_to_stdout = False -# by default (as of 1.3), Ansible will raise errors when attempting to dereference +# by default (as of 1.3), Ansible will raise errors when attempting to dereference # Jinja2 variables that are not set in templates or action lines. Uncomment this line # to revert the behavior to pre-1.3. #error_on_undefined_vars = False @@ -127,7 +127,7 @@ # (as of 1.8), Ansible can optionally warn when usage of the shell and # command module appear to be simplified by using a default Ansible module # instead. These warnings can be silenced by adjusting the following -# setting or adding warn=yes or warn=no to the end of the command line +# setting or adding warn=yes or warn=no to the end of the command line # parameter string. This will for example suggest using the git module # instead of shelling out to the git command. # command_warnings = False @@ -143,13 +143,13 @@ #test_plugins = /usr/share/ansible/plugins/test # by default callbacks are not loaded for /bin/ansible, enable this if you -# want, for example, a notification or logging callback to also apply to +# want, for example, a notification or logging callback to also apply to # /bin/ansible runs #bin_ansible_callbacks = False # don't like cows? that's unfortunate. -# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 +# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 #nocows = 1 # set which cowsay stencil you'd like to use by default. When set to 'random', @@ -218,32 +218,32 @@ [ssh_connection] # ssh arguments to use -# Leaving off ControlPersist will result in poor performance, so use +# Leaving off ControlPersist will result in poor performance, so use # paramiko on older platforms rather than removing it #ssh_args = -o ControlMaster=auto -o ControlPersist=60s # The path to use for the ControlPath sockets. This defaults to # "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with -# very long hostnames or very long path names (caused by long user names or +# very long hostnames or very long path names (caused by long user names or # deeply nested home directories) this can exceed the character limit on -# file socket names (108 characters for most platforms). In that case, you +# file socket names (108 characters for most platforms). In that case, you # may wish to shorten the string below. -# -# Example: +# +# Example: # control_path = %(directory)s/%%h-%%r #control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r -# Enabling pipelining reduces the number of SSH operations required to -# execute a module on the remote server. This can result in a significant -# performance improvement when enabled, however when using "sudo:" you must +# Enabling pipelining reduces the number of SSH operations required to +# execute a module on the remote server. This can result in a significant +# performance improvement when enabled, however when using "sudo:" you must # first disable 'requiretty' in /etc/sudoers # # By default, this option is disabled to preserve compatibility with # sudoers configurations that have requiretty (the default on many distros). -# +# #pipelining = False -# if True, make ansible use scp if the connection type is ssh +# if True, make ansible use scp if the connection type is ssh # (default is sftp) #scp_if_ssh = True @@ -259,7 +259,7 @@ # The daemon timeout is measured in minutes. This time is measured # from the last activity to the accelerate daemon. -#accelerate_daemon_timeout = 30 +#accelerate_daemon_timeout = 30 # If set to yes, accelerate_multi_key will allow multiple # private keys to be uploaded to it, though each user must From 3e28ee0fd473d8ebff115680e3d844b0426808d1 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 25 Feb 2016 19:58:27 -0500 Subject: [PATCH 0726/1113] avoid printing internal loop and loop_args --- hacking/templates/playbooks_directives.rst.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hacking/templates/playbooks_directives.rst.j2 b/hacking/templates/playbooks_directives.rst.j2 index 0dc9408e430..6f0528cefed 100644 --- a/hacking/templates/playbooks_directives.rst.j2 +++ b/hacking/templates/playbooks_directives.rst.j2 @@ -13,7 +13,9 @@ Note that not all directives affect the object itself and might just be there to {{ name }} {{ '-' * name|length }} {% for attribute in oblist[name].__dict__['_attributes']|sort %} +{% if attribute not in ['loop', 'loop_args'] %} * {{ attribute }} +{% endif %} {% endfor %} {% endfor %} From 0eca47cf91adc487af5250039381f9b468bbb258 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Fri, 26 Feb 2016 15:03:51 +0100 Subject: [PATCH 0727/1113] Avoid merging a dict and a AnsibleUnicode This is the same fix we applied to v1.9 in PR #14565, however it does not fix #14678 completely ! The dictionaries are not being merged as tey are on v1.9. --- lib/ansible/utils/vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/vars.py b/lib/ansible/utils/vars.py index 73ba52b4b39..01eb5ac8212 100644 --- a/lib/ansible/utils/vars.py +++ b/lib/ansible/utils/vars.py @@ -86,7 +86,7 @@ def merge_hash(a, b): for k, v in iteritems(b): # if there's already such key in a # and that key contains a MutableMapping - if k in result and isinstance(result[k], MutableMapping): + if k in result and isinstance(result[k], MutableMapping) and isinstance(v, MutableMapping): # merge those dicts recursively result[k] = merge_hash(result[k], v) else: From 3518a05db6ea2616888dba02863f51b395616647 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 26 Feb 2016 12:47:59 -0500 Subject: [PATCH 0728/1113] Starting to expand unit tests for ActionBase plugin class --- test/units/plugins/action/test_action.py | 181 +++++++++++++++++++++-- 1 file changed, 172 insertions(+), 9 deletions(-) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index 401d1363e3e..89c59a9b4fd 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -19,30 +19,193 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import ast +import json +import pipes +from sys import version_info +if version_info.major == 2: + import __builtin__ as builtins +else: + import builtins + +from ansible import __version__ as ansible_version from ansible import constants as C +from ansible.compat.six import text_type from ansible.compat.tests import unittest -from ansible.compat.tests.mock import Mock +from ansible.compat.tests.mock import patch, MagicMock, mock_open +from ansible.errors import AnsibleError from ansible.playbook.play_context import PlayContext +from ansible.plugins import PluginLoader from ansible.plugins.action import ActionBase +from ansible.template import Templar +from units.mock.loader import DictDataLoader + +python_module_replacers = """ +#!/usr/bin/python + +#ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>" +#MODULE_ARGS = "<<INCLUDE_ANSIBLE_MODULE_ARGS>>" +#MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>" +#SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>" + +from ansible.module_utils.basic import * +""" + +powershell_module_replacers = """ +WINDOWS_ARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>" +# POWERSHELL_COMMON +""" + +class DerivedActionBase(ActionBase): + def run(self, tmp=None, task_vars=None): + # We're not testing the plugin run() method, just the helper + # methods ActionBase defines + return super(DerivedActionBase, self).run(tmp=tmp, task_vars=task_vars) class TestActionBase(unittest.TestCase): - class DerivedActionBase(ActionBase): - def run(self, tmp=None, task_vars=None): - # We're not testing the plugin run() method, just the helper - # methods ActionBase defines - return dict() + def test_action_base_run(self): + mock_task = MagicMock() + mock_task.action = "foo" + mock_task.args = dict(a=1, b=2, c=3) + + mock_connection = MagicMock() + + play_context = PlayContext() + + mock_task.async = None + action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None) + results = action_base.run() + self.assertEqual(results, dict()) + + mock_task.async = 0 + action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None) + results = action_base.run() + self.assertEqual(results, dict(invocation=dict(module_name='foo', module_args=dict(a=1, b=2, c=3)))) + + def test_action_base__configure_module(self): + fake_loader = DictDataLoader({ + }) + + # create our fake task + mock_task = MagicMock() + mock_task.action = "copy" + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + + # create a mock shared loader object + def mock_find_plugin(name, options): + if name == 'badmodule': + return None + elif '.ps1' in options: + return '/fake/path/to/%s.ps1' % name + else: + return '/fake/path/to/%s' % name + + mock_module_loader = MagicMock() + mock_module_loader.find_plugin.side_effect = mock_find_plugin + mock_shared_obj_loader = MagicMock() + mock_shared_obj_loader.module_loader = mock_module_loader + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=fake_loader, + templar=None, + shared_loader_obj=mock_shared_obj_loader, + ) + + # test python module formatting + with patch.object(builtins, 'open', mock_open(read_data=python_module_replacers.strip())) as m: + mock_task.args = dict(a=1) + mock_connection.module_implementation_preferences = ('',) + (style, shebang, data) = action_base._configure_module(mock_task.action, mock_task.args) + self.assertEqual(style, "new") + self.assertEqual(shebang, "#!/usr/bin/python") + + # test module not found + self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args) + + # test powershell module formatting + with patch.object(builtins, 'open', mock_open(read_data=powershell_module_replacers.strip())) as m: + mock_task.action = 'win_copy' + mock_task.args = dict(b=2) + mock_connection.module_implementation_preferences = ('.ps1',) + (style, shebang, data) = action_base._configure_module('stat', mock_task.args) + self.assertEqual(style, "new") + self.assertEqual(shebang, None) + + # test module not found + self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args) + + def test_action_base__compute_environment_string(self): + fake_loader = DictDataLoader({ + }) + + # create our fake task + mock_task = MagicMock() + mock_task.action = "copy" + mock_task.args = dict(a=1) + + # create a mock connection, so we don't actually try and connect to things + def env_prefix(**args): + return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in args.items()]) + mock_connection = MagicMock() + mock_connection._shell.env_prefix.side_effect = env_prefix + + # we're using a real play context here + play_context = PlayContext() + + # and we're using a real templar here too + templar = Templar(loader=fake_loader) + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=fake_loader, + templar=templar, + shared_loader_obj=None, + ) + + # test standard environment setup + mock_task.environment = [dict(FOO='foo'), None] + env_string = action_base._compute_environment_string() + self.assertEqual(env_string, "FOO=foo") + + # test where environment is not a list + mock_task.environment = dict(FOO='foo') + env_string = action_base._compute_environment_string() + self.assertEqual(env_string, "FOO=foo") + + # test environment with a variable in it + templar.set_available_variables(variables=dict(the_var='bar')) + mock_task.environment = [dict(FOO='{{the_var}}')] + env_string = action_base._compute_environment_string() + self.assertEqual(env_string, "FOO=bar") + + # test with a bad environment set + mock_task.environment = dict(FOO='foo') + mock_task.environment = ['hi there'] + self.assertRaises(AnsibleError, action_base._compute_environment_string) def test_sudo_only_if_user_differs(self): play_context = PlayContext() - action_base = self.DerivedActionBase(None, None, play_context, None, None, None) - action_base._connection = Mock(exec_command=Mock(return_value=(0, '', ''))) + action_base = DerivedActionBase(None, None, play_context, None, None, None) + action_base._connection = MagicMock(exec_command=MagicMock(return_value=(0, '', ''))) play_context.become = True play_context.become_user = play_context.remote_user = 'root' - play_context.make_become_cmd = Mock(return_value='CMD') + play_context.make_become_cmd = MagicMock(return_value='CMD') action_base._low_level_execute_command('ECHO', sudoable=True) play_context.make_become_cmd.assert_not_called() From ef8bec18bf508da9307783068873abf9b21db713 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 26 Feb 2016 08:59:49 -0800 Subject: [PATCH 0729/1113] Use a unicode format string so that we don't convert to byte strings Fixes #14349 --- lib/ansible/executor/playbook_executor.py | 3 ++- lib/ansible/parsing/yaml/constructor.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index ce91b7f6025..31d62984508 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -27,6 +27,7 @@ from ansible import constants as C from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook from ansible.template import Templar +from ansible.utils.unicode import to_unicode try: from __main__ import display @@ -81,7 +82,7 @@ class PlaybookExecutor: i = 1 plays = pb.get_plays() - display.vv('%d plays in %s' % (len(plays), playbook_path)) + display.vv(u'%d plays in %s' % (len(plays), to_unicode(playbook_path))) for play in plays: if play._included_path is not None: diff --git a/lib/ansible/parsing/yaml/constructor.py b/lib/ansible/parsing/yaml/constructor.py index 164d23b497e..6c984ad0802 100644 --- a/lib/ansible/parsing/yaml/constructor.py +++ b/lib/ansible/parsing/yaml/constructor.py @@ -66,7 +66,7 @@ class AnsibleConstructor(Constructor): "found unacceptable key (%s)" % exc, key_node.start_mark) if key in mapping: - display.warning('While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}). Using last defined value only.'.format(key, *mapping.ansible_pos)) + display.warning(u'While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}). Using last defined value only.'.format(key, *mapping.ansible_pos)) value = self.construct_object(value_node, deep=deep) mapping[key] = value From 1f2595306a14807500d969bc91d53eeb83b56087 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 26 Feb 2016 10:26:49 -0800 Subject: [PATCH 0730/1113] normalize path components to unicode before combining or operating on them Note that this will break if we deal with non-utf8 paths. Fixing this way because converting everythig to byte strings instead is a very invasive task so it should be done as a specific feature to provide support for non-utf8 paths at some point in the future (if needed). --- lib/ansible/parsing/dataloader.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py index b295560c931..f40d72450c2 100644 --- a/lib/ansible/parsing/dataloader.py +++ b/lib/ansible/parsing/dataloader.py @@ -199,13 +199,15 @@ class DataLoader(): ''' given = unquote(given) + given = to_unicode(given, errors='strict') - if given.startswith("/"): + if given.startswith(u"/"): return os.path.abspath(given) - elif given.startswith("~"): + elif given.startswith(u"~"): return os.path.abspath(os.path.expanduser(given)) else: - return os.path.abspath(os.path.join(self._basedir, given)) + basedir = to_unicode(self._basedir, errors='strict') + return os.path.abspath(os.path.join(basedir, given)) def path_dwim_relative(self, path, dirname, source): ''' From 528f073bfa7e53cca9285195ed30d6cfb6142bd5 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 26 Feb 2016 13:51:23 -0500 Subject: [PATCH 0731/1113] update core subref --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 132b8e8f511..45367c3d090 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 132b8e8f511d5ce7af01cf82254182406c56aecd +Subproject commit 45367c3d090ccf4d649b103b50b6eec939b6ee14 From 512d3dd62108509e1831c431b300d3955476c82c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 26 Feb 2016 14:56:54 -0500 Subject: [PATCH 0732/1113] with_ loops always should want a list --- lib/ansible/executor/task_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index d9cb7170b33..94ad6786c02 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -188,7 +188,7 @@ class TaskExecutor: except AnsibleUndefinedVariable as e: loop_terms = [] display.deprecated("Skipping task due to undefined Error, in the future this will be a fatal error.: %s" % to_bytes(e)) - items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=self._job_vars) + items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar).run(terms=loop_terms, variables=self._job_vars, wantlist=True) else: raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop) From a3489408a5e1bdbcd90996ec6f7a31c1c4490b31 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 26 Feb 2016 16:18:55 -0500 Subject: [PATCH 0733/1113] fixes to playbooks_directives generation order is now predictable now correctly substitutes loop for with_ adds local_action to action --- hacking/dump_playbook_attributes.py | 25 ++++++++++++++++--- hacking/templates/playbooks_directives.rst.j2 | 9 +++---- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/hacking/dump_playbook_attributes.py b/hacking/dump_playbook_attributes.py index 3fa2b410846..89f5273f3c3 100755 --- a/hacking/dump_playbook_attributes.py +++ b/hacking/dump_playbook_attributes.py @@ -10,9 +10,8 @@ from ansible.playbook.task import Task template_file = 'playbooks_directives.rst.j2' oblist = {} -for aclass in Play, Block, Role, Task: - aobj = aclass() - oblist[type(aobj).__name__] = aobj +clist = [] +class_list = [ Play, Role, Block, Task ] p = optparse.OptionParser( version='%prog 1.0', @@ -24,10 +23,28 @@ p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/ (options, args) = p.parse_args() +for aclass in class_list + aobj = aclass() + name = type(aobj).__name__ + + # build ordered list to loop over and dict with attributes + clist.append(name) + oblist[name] = aobj.__dict__['_attributes'] + + # loop is really with_ for users + if 'loop' in oblist[name]: + oblist[name]['with_<lookup_plugin>'] = True + del oblist[name]['loop'] + del oblist[name]['loop_args'] + + # local_action is implicit with action + if 'action' in oblist[name]: + oblist[name]['local_action'] = True + env = Environment(loader=FileSystemLoader(options.template_dir), trim_blocks=True,) template = env.get_template(template_file) outputname = options.output_dir + template_file.replace('.j2','') -tempvars = { 'oblist': oblist } +tempvars = { 'oblist': oblist, 'clist': clist } with open( outputname, 'w') as f: f.write(template.render(tempvars)) diff --git a/hacking/templates/playbooks_directives.rst.j2 b/hacking/templates/playbooks_directives.rst.j2 index 6f0528cefed..e54d0d455d4 100644 --- a/hacking/templates/playbooks_directives.rst.j2 +++ b/hacking/templates/playbooks_directives.rst.j2 @@ -1,21 +1,20 @@ Directives Glossary =================== -Here we list the common playbook objects and the possible directives that can be used with them. +Here we list the common playbook objects and the their directives. Note that not all directives affect the object itself and might just be there to be inherited by other contained objects. +Aliases for the directives are not reflected here, nor are mutable ones, for example `action` in task can be substituted by the name of any module plugin. .. contents:: :local: :depth: 1 -{% for name in oblist %} +{% for name in clist %} {{ name }} {{ '-' * name|length }} -{% for attribute in oblist[name].__dict__['_attributes']|sort %} -{% if attribute not in ['loop', 'loop_args'] %} +{% for attribute in oblist[name]|sort %} * {{ attribute }} -{% endif %} {% endfor %} {% endfor %} From cbc797a7ed14bc29b09730b83a5a22444fdb4bb3 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 26 Feb 2016 16:27:10 -0500 Subject: [PATCH 0734/1113] added missing : --- hacking/dump_playbook_attributes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/dump_playbook_attributes.py b/hacking/dump_playbook_attributes.py index 89f5273f3c3..6a0f08f45bc 100755 --- a/hacking/dump_playbook_attributes.py +++ b/hacking/dump_playbook_attributes.py @@ -23,7 +23,7 @@ p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/ (options, args) = p.parse_args() -for aclass in class_list +for aclass in class_list: aobj = aclass() name = type(aobj).__name__ From 01d835700ba09c88209988a8b01a4125244eed11 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 26 Feb 2016 16:21:42 -0500 Subject: [PATCH 0735/1113] Cleaning up some py version problems with ActionBase unit tests --- test/units/plugins/action/test_action.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index 89c59a9b4fd..ea884437f23 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -24,7 +24,7 @@ import json import pipes from sys import version_info -if version_info.major == 2: +if version_info[0] == 2: import __builtin__ as builtins else: import builtins @@ -124,7 +124,7 @@ class TestActionBase(unittest.TestCase): ) # test python module formatting - with patch.object(builtins, 'open', mock_open(read_data=python_module_replacers.strip())) as m: + with patch.object(builtins, 'open', mock_open(read_data=text_type(python_module_replacers.strip()))) as m: mock_task.args = dict(a=1) mock_connection.module_implementation_preferences = ('',) (style, shebang, data) = action_base._configure_module(mock_task.action, mock_task.args) @@ -135,7 +135,7 @@ class TestActionBase(unittest.TestCase): self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args) # test powershell module formatting - with patch.object(builtins, 'open', mock_open(read_data=powershell_module_replacers.strip())) as m: + with patch.object(builtins, 'open', mock_open(read_data=text_type(powershell_module_replacers.strip()))) as m: mock_task.action = 'win_copy' mock_task.args = dict(b=2) mock_connection.module_implementation_preferences = ('.ps1',) From b0bed272116a4512641388b7bf4c57f514f11c81 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 26 Feb 2016 16:43:22 -0500 Subject: [PATCH 0736/1113] minor doc fixes --- docsite/rst/faq.rst | 6 ++++-- docsite/rst/intro_configuration.rst | 4 ++-- docsite/rst/playbooks_advanced_syntax.rst | 4 ++-- docsite/rst/playbooks_variables.rst | 2 +- docsite/rst/porting_guide_2.0.rst | 1 + 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index a4b73b7b800..727f4fb1501 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -174,7 +174,9 @@ How do I loop over a list of hosts in a group, inside of a template? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ A pretty common pattern is to iterate over a list of hosts inside of a host group, perhaps to populate a template configuration -file with a list of servers. To do this, you can just access the "$groups" dictionary in your template, like this:: +file with a list of servers. To do this, you can just access the "$groups" dictionary in your template, like this: + +.. code-block:: jinja2 {% for host in groups['db_servers'] %} {{ host }} @@ -184,7 +186,7 @@ If you need to access facts about these hosts, for instance, the IP address of e - hosts: db_servers tasks: - - # doesn't matter what you do, just that they were talked to previously. + - debug: msg="doesn't matter what you do, just that they were talked to previously." Then you can use the facts inside your template, like this:: diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index a42d7be73cb..4272ef7fb9b 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -228,7 +228,7 @@ Allows disabling of deprecating warnings in ansible-playbook output:: Deprecation warnings indicate usage of legacy features that are slated for removal in a future release of Ansible. -.. _display_args_to_stdout +.. _display_args_to_stdout: display_args_to_stdout ====================== @@ -768,7 +768,7 @@ instead. Setting it to False will improve performance and is recommended when h record_host_keys=True -.. _paramiko_proxy_command +.. _paramiko_proxy_command: proxy_command ============= diff --git a/docsite/rst/playbooks_advanced_syntax.rst b/docsite/rst/playbooks_advanced_syntax.rst index 932cfc87b96..930f9239527 100644 --- a/docsite/rst/playbooks_advanced_syntax.rst +++ b/docsite/rst/playbooks_advanced_syntax.rst @@ -5,14 +5,14 @@ Advanced Syntax This page describes advanced YAML syntax that enables you to have more control over the data placed in YAML files used by Ansible. -.. _yaml_tags_and_python_types +.. _yaml_tags_and_python_types: YAML tags and Python types `````````````````````````` The documentation covered here is an extension of the documentation that can be found in the `PyYAML Documentation <http://pyyaml.org/wiki/PyYAMLDocumentation#YAMLtagsandPythontypes>`_ -.. _unsafe_strings +.. _unsafe_strings: Unsafe or Raw Strings ~~~~~~~~~~~~~~~~~~~~~ diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 07b71c64fec..22be4abeef2 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -936,7 +936,7 @@ how all of these things can work together. Advanced Syntax ``````````````` -For information about advanced YAML syntax used to declare variables and have more control over the data placed in YAML files used by Ansible, see `playbooks_advanced_syntax`_ +For information about advanced YAML syntax used to declare variables and have more control over the data placed in YAML files used by Ansible, see `:doc:playbooks_advanced_syntax` .. seealso:: diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index e7b0f2146ea..8c95c9a4d06 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -163,6 +163,7 @@ Here are some corner cases encountered when updating, these are mostly caused by - task: dostuf becom: yes + The task always ran without using privilege escalation (for that you need `become`) but was also silently ignored so the play 'ran' even though it should not, now this is a parsing error. From c29f51804b5c9e325b72cc7bb288a1363c712d99 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 26 Feb 2016 16:41:13 -0800 Subject: [PATCH 0737/1113] Fix mixing of bytes and str in module replacer (caused traceback on python3) --- lib/ansible/executor/module_common.py | 67 ++++++++--------- test/units/plugins/action/test_action.py | 94 ++++++++++++++++++++++-- 2 files changed, 123 insertions(+), 38 deletions(-) diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py index f0e307cba0b..e6fa2148b8d 100644 --- a/lib/ansible/executor/module_common.py +++ b/lib/ansible/executor/module_common.py @@ -21,7 +21,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type # from python and deps -from ansible.compat.six.moves import StringIO +from io import BytesIO import json import os import shlex @@ -30,20 +30,20 @@ import shlex from ansible import __version__ from ansible import constants as C from ansible.errors import AnsibleError -from ansible.utils.unicode import to_bytes +from ansible.utils.unicode import to_bytes, to_unicode -REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>" -REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\"" -REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\"" -REPLACER_WINDOWS = "# POWERSHELL_COMMON" -REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>" -REPLACER_JSONARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>" -REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\"" -REPLACER_SELINUX = "<<SELINUX_SPECIAL_FILESYSTEMS>>" +REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>" +REPLACER_ARGS = b"\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\"" +REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\"" +REPLACER_WINDOWS = b"# POWERSHELL_COMMON" +REPLACER_WINARGS = b"<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>" +REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>" +REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\"" +REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>" # We could end up writing out parameters with unicode characters so we need to # specify an encoding for the python source file -ENCODING_STRING = '# -*- coding: utf-8 -*-' +ENCODING_STRING = b'# -*- coding: utf-8 -*-' # we've moved the module_common relative to the snippets, so fix the path _SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils') @@ -53,7 +53,7 @@ _SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils') def _slurp(path): if not os.path.exists(path): raise AnsibleError("imported module support code does not exist at %s" % path) - fd = open(path) + fd = open(path, 'rb') data = fd.read() fd.close() return data @@ -71,49 +71,49 @@ def _find_snippet_imports(module_data, module_path, strip_comments): module_style = 'new' elif REPLACER_JSONARGS in module_data: module_style = 'new' - elif 'from ansible.module_utils.' in module_data: + elif b'from ansible.module_utils.' in module_data: module_style = 'new' - elif 'WANT_JSON' in module_data: + elif b'WANT_JSON' in module_data: module_style = 'non_native_want_json' - output = StringIO() - lines = module_data.split('\n') + output = BytesIO() + lines = module_data.split(b'\n') snippet_names = [] for line in lines: if REPLACER in line: output.write(_slurp(os.path.join(_SNIPPET_PATH, "basic.py"))) - snippet_names.append('basic') + snippet_names.append(b'basic') if REPLACER_WINDOWS in line: ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1")) output.write(ps_data) - snippet_names.append('powershell') - elif line.startswith('from ansible.module_utils.'): - tokens=line.split(".") + snippet_names.append(b'powershell') + elif line.startswith(b'from ansible.module_utils.'): + tokens=line.split(b".") import_error = False if len(tokens) != 3: import_error = True - if " import *" not in line: + if b" import *" not in line: import_error = True if import_error: raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.<lib name> import *'" % module_path) snippet_name = tokens[2].split()[0] snippet_names.append(snippet_name) - output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py"))) + output.write(_slurp(os.path.join(_SNIPPET_PATH, to_unicode(snippet_name) + ".py"))) else: - if strip_comments and line.startswith("#") or line == '': + if strip_comments and line.startswith(b"#") or line == b'': pass output.write(line) - output.write("\n") + output.write(b"\n") if not module_path.endswith(".ps1"): # Unixy modules - if len(snippet_names) > 0 and not 'basic' in snippet_names: + if len(snippet_names) > 0 and not b'basic' in snippet_names: raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path) else: # Windows modules - if len(snippet_names) > 0 and not 'powershell' in snippet_names: + if len(snippet_names) > 0 and not b'powershell' in snippet_names: raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path) return (output.getvalue(), module_style) @@ -158,28 +158,28 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal # * Cache the modified module? If only the args are different and we do # that as the last step we could cache all the work up to that point. - with open(module_path) as f: + with open(module_path, 'rb') as f: # read in the module source module_data = f.read() (module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments) - module_args_json = json.dumps(module_args).encode('utf-8') - python_repred_args = repr(module_args_json) + module_args_json = to_bytes(json.dumps(module_args)) + python_repred_args = to_bytes(repr(module_args_json)) # these strings should be part of the 'basic' snippet which is required to be included - module_data = module_data.replace(REPLACER_VERSION, repr(__version__)) + module_data = module_data.replace(REPLACER_VERSION, to_bytes(__version__, nonstring='repr')) module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args) module_data = module_data.replace(REPLACER_WINARGS, module_args_json) module_data = module_data.replace(REPLACER_JSONARGS, module_args_json) - module_data = module_data.replace(REPLACER_SELINUX, ','.join(C.DEFAULT_SELINUX_SPECIAL_FS)) + module_data = module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS))) if module_style == 'new': facility = C.DEFAULT_SYSLOG_FACILITY if 'ansible_syslog_facility' in task_vars: facility = task_vars['ansible_syslog_facility'] - module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility) + module_data = module_data.replace(b'syslog.LOG_USER', to_bytes("syslog.%s" % facility)) lines = module_data.split(b"\n", 1) shebang = None @@ -188,12 +188,13 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal args = shlex.split(str(shebang[2:])) interpreter = args[0] interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter) + interpreter = to_bytes(interpreter) if interpreter_config in task_vars: interpreter = to_bytes(task_vars[interpreter_config], errors='strict') lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:])) - if os.path.basename(interpreter).startswith('python'): + if os.path.basename(interpreter).startswith(b'python'): lines.insert(1, ENCODING_STRING) else: # No shebang, assume a binary module? diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index ea884437f23..57aa38c20a9 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # (c) 2015, Florian Apolloner <florian@apolloner.eu> # # This file is part of Ansible @@ -34,15 +35,17 @@ from ansible import constants as C from ansible.compat.six import text_type from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock, mock_open + from ansible.errors import AnsibleError from ansible.playbook.play_context import PlayContext from ansible.plugins import PluginLoader from ansible.plugins.action import ActionBase from ansible.template import Templar +from ansible.utils.unicode import to_bytes from units.mock.loader import DictDataLoader -python_module_replacers = """ +python_module_replacers = b""" #!/usr/bin/python #ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>" @@ -50,14 +53,95 @@ python_module_replacers = """ #MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>" #SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>" +test = u'Toshio \u304f\u3089\u3068\u307f' from ansible.module_utils.basic import * """ -powershell_module_replacers = """ +powershell_module_replacers = b""" WINDOWS_ARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>" # POWERSHELL_COMMON """ +# Prior to 3.4.4, mock_open cannot handle binary read_data +if version_info >= (3,) and version_info < (3, 4, 4): + file_spec = None + + def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + + def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_data) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_data) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _data: + yield line + + + global file_spec + if file_spec is None: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _data = _iterate_read_data(read_data) + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + handle.readline.side_effect = _readline_side_effect() + handle.readlines.side_effect = _readlines_side_effect + + mock.return_value = handle + return mock + + class DerivedActionBase(ActionBase): def run(self, tmp=None, task_vars=None): # We're not testing the plugin run() method, just the helper @@ -124,18 +208,18 @@ class TestActionBase(unittest.TestCase): ) # test python module formatting - with patch.object(builtins, 'open', mock_open(read_data=text_type(python_module_replacers.strip()))) as m: + with patch.object(builtins, 'open', mock_open(read_data=to_bytes(python_module_replacers.strip(), encoding='utf-8'))) as m: mock_task.args = dict(a=1) mock_connection.module_implementation_preferences = ('',) (style, shebang, data) = action_base._configure_module(mock_task.action, mock_task.args) self.assertEqual(style, "new") - self.assertEqual(shebang, "#!/usr/bin/python") + self.assertEqual(shebang, b"#!/usr/bin/python") # test module not found self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args) # test powershell module formatting - with patch.object(builtins, 'open', mock_open(read_data=text_type(powershell_module_replacers.strip()))) as m: + with patch.object(builtins, 'open', mock_open(read_data=to_bytes(powershell_module_replacers.strip(), encoding='utf-8'))) as m: mock_task.action = 'win_copy' mock_task.args = dict(b=2) mock_connection.module_implementation_preferences = ('.ps1',) From b70bf3b0568159f5a828227baf1d4638a683374c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 26 Feb 2016 16:42:18 -0800 Subject: [PATCH 0738/1113] Use io.StringIO and io.BytesIO instead of StringIO.StringIO for compat with py3 --- lib/ansible/module_utils/facts.py | 7 +++++- lib/ansible/module_utils/shell.py | 6 ++++- lib/ansible/plugins/lookup/ini.py | 6 ++--- lib/ansible/template/__init__.py | 3 ++- .../module_utils/basic/test_exit_json.py | 8 +++--- .../module_utils/basic/test_run_command.py | 8 +++--- test/units/parsing/yaml/test_loader.py | 25 ++++++++++--------- .../plugins/connections/test_connection.py | 2 +- 8 files changed, 38 insertions(+), 27 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index e698d780ffb..23007e1689e 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -34,7 +34,12 @@ import datetime import getpass import pwd import ConfigParser -import StringIO + +# py2 vs py3; replace with six via ziploader +try: + from StringIO import StringIO +except ImportError: + from io import StringIO from string import maketrans diff --git a/lib/ansible/module_utils/shell.py b/lib/ansible/module_utils/shell.py index b9e798603c7..bcb88fa790b 100644 --- a/lib/ansible/module_utils/shell.py +++ b/lib/ansible/module_utils/shell.py @@ -19,7 +19,11 @@ import re import socket -from StringIO import StringIO +# py2 vs py3; replace with six via ziploader +try: + from StringIO import StringIO +except ImportError: + from io import StringIO try: import paramiko diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py index 3adbd2c7d67..0e68816dc32 100644 --- a/lib/ansible/plugins/lookup/ini.py +++ b/lib/ansible/plugins/lookup/ini.py @@ -17,7 +17,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import StringIO +from io import StringIO import os import ConfigParser import re @@ -28,8 +28,8 @@ from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): def read_properties(self, filename, key, dflt, is_regexp): - config = StringIO.StringIO() - config.write('[java_properties]\n' + open(filename).read()) + config = StringIO() + config.write(u'[java_properties]\n' + open(filename).read()) config.seek(0, os.SEEK_SET) self.cp.readfp(config) return self.get_value(key, 'java_properties', dflt, is_regexp) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index edb34bb4bed..a7a8ac4a37c 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -23,8 +23,9 @@ import ast import contextlib import os import re +from io import StringIO -from ansible.compat.six import string_types, text_type, binary_type, StringIO +from ansible.compat.six import string_types, text_type, binary_type from jinja2 import Environment from jinja2.loaders import FileSystemLoader from jinja2.exceptions import TemplateSyntaxError, UndefinedError diff --git a/test/units/module_utils/basic/test_exit_json.py b/test/units/module_utils/basic/test_exit_json.py index 27bbb0f9e56..7d32c8082f4 100644 --- a/test/units/module_utils/basic/test_exit_json.py +++ b/test/units/module_utils/basic/test_exit_json.py @@ -23,9 +23,9 @@ __metaclass__ = type import copy import json import sys +from io import BytesIO from ansible.compat.tests import unittest -from ansible.compat.six import StringIO from ansible.module_utils import basic from ansible.module_utils.basic import heuristic_log_sanitize @@ -41,7 +41,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase): basic.MODULE_COMPLEX_ARGS = '{}' self.old_stdout = sys.stdout - self.fake_stream = StringIO() + self.fake_stream = BytesIO() sys.stdout = self.fake_stream self.module = basic.AnsibleModule(argument_spec=dict()) @@ -127,7 +127,7 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): def test_exit_json_removes_values(self): self.maxDiff = None for args, return_val, expected in self.dataset: - sys.stdout = StringIO() + sys.stdout = BytesIO() basic.MODULE_COMPLEX_ARGS = json.dumps(args) module = basic.AnsibleModule( argument_spec = dict( @@ -146,7 +146,7 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): expected = copy.deepcopy(expected) del expected['changed'] expected['failed'] = True - sys.stdout = StringIO() + sys.stdout = BytesIO() basic.MODULE_COMPLEX_ARGS = json.dumps(args) module = basic.AnsibleModule( argument_spec = dict( diff --git a/test/units/module_utils/basic/test_run_command.py b/test/units/module_utils/basic/test_run_command.py index 0db6fbe7b94..191560e9616 100644 --- a/test/units/module_utils/basic/test_run_command.py +++ b/test/units/module_utils/basic/test_run_command.py @@ -22,16 +22,16 @@ __metaclass__ = type import errno import sys import time +from io import BytesIO from ansible.compat.tests import unittest -from ansible.compat.six import StringIO, BytesIO from ansible.compat.tests.mock import call, MagicMock, Mock, patch, sentinel from ansible.module_utils import basic from ansible.module_utils.basic import AnsibleModule -class OpenStringIO(StringIO): - """StringIO with dummy close() method +class OpenBytesIO(BytesIO): + """BytesIO with dummy close() method So that you can inspect the content after close() was called. """ @@ -77,7 +77,7 @@ class TestAnsibleModuleRunCommand(unittest.TestCase): self.subprocess = patch('ansible.module_utils.basic.subprocess').start() self.cmd = Mock() self.cmd.returncode = 0 - self.cmd.stdin = OpenStringIO() + self.cmd.stdin = OpenBytesIO() self.cmd.stdout.fileno.return_value = sentinel.stdout self.cmd.stderr.fileno.return_value = sentinel.stderr self.subprocess.Popen.return_value = self.cmd diff --git a/test/units/parsing/yaml/test_loader.py b/test/units/parsing/yaml/test_loader.py index 8fd617eea19..48a60b6c537 100644 --- a/test/units/parsing/yaml/test_loader.py +++ b/test/units/parsing/yaml/test_loader.py @@ -20,8 +20,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from io import StringIO + from six import text_type, binary_type -from six.moves import StringIO from collections import Sequence, Set, Mapping from ansible.compat.tests import unittest @@ -44,7 +45,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase): pass def test_parse_number(self): - stream = StringIO(""" + stream = StringIO(u""" 1 """) loader = AnsibleLoader(stream, 'myfile.yml') @@ -53,7 +54,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase): # No line/column info saved yet def test_parse_string(self): - stream = StringIO(""" + stream = StringIO(u""" Ansible """) loader = AnsibleLoader(stream, 'myfile.yml') @@ -64,7 +65,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase): self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) def test_parse_utf8_string(self): - stream = StringIO(""" + stream = StringIO(u""" Cafè Eñyei """) loader = AnsibleLoader(stream, 'myfile.yml') @@ -75,7 +76,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase): self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) def test_parse_dict(self): - stream = StringIO(""" + stream = StringIO(u""" webster: daniel oed: oxford """) @@ -93,7 +94,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase): self.assertEqual(data[u'oed'].ansible_pos, ('myfile.yml', 3, 22)) def test_parse_list(self): - stream = StringIO(""" + stream = StringIO(u""" - a - b """) @@ -109,7 +110,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase): self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19)) def test_parse_short_dict(self): - stream = StringIO("""{"foo": "bar"}""") + stream = StringIO(u"""{"foo": "bar"}""") loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, dict(foo=u'bar')) @@ -117,7 +118,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase): self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1)) self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 9)) - stream = StringIO("""foo: bar""") + stream = StringIO(u"""foo: bar""") loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, dict(foo=u'bar')) @@ -126,12 +127,12 @@ class TestAnsibleLoaderBasic(unittest.TestCase): self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 6)) def test_error_conditions(self): - stream = StringIO("""{""") + stream = StringIO(u"""{""") loader = AnsibleLoader(stream, 'myfile.yml') self.assertRaises(ParserError, loader.get_single_data) def test_front_matter(self): - stream = StringIO("""---\nfoo: bar""") + stream = StringIO(u"""---\nfoo: bar""") loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, dict(foo=u'bar')) @@ -140,7 +141,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase): self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 2, 6)) # Initial indent (See: #6348) - stream = StringIO(""" - foo: bar\n baz: qux""") + stream = StringIO(u""" - foo: bar\n baz: qux""") loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, [{u'foo': u'bar', u'baz': u'qux'}]) @@ -154,7 +155,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase): class TestAnsibleLoaderPlay(unittest.TestCase): def setUp(self): - stream = StringIO(""" + stream = StringIO(u""" - hosts: localhost vars: number: 1 diff --git a/test/units/plugins/connections/test_connection.py b/test/units/plugins/connections/test_connection.py index 10fa44216dc..370768891d5 100644 --- a/test/units/plugins/connections/test_connection.py +++ b/test/units/plugins/connections/test_connection.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from six import StringIO +from io import StringIO from ansible.compat.tests import unittest from ansible.playbook.play_context import PlayContext From e01b6ad6b472c191e316c4a8405801f56e9427fb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 26 Feb 2016 16:57:32 -0800 Subject: [PATCH 0739/1113] We are actually taking the repr of a string so we need separate to_bytes and repr calls --- lib/ansible/executor/module_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py index e6fa2148b8d..9d50be0ca32 100644 --- a/lib/ansible/executor/module_common.py +++ b/lib/ansible/executor/module_common.py @@ -169,7 +169,7 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal python_repred_args = to_bytes(repr(module_args_json)) # these strings should be part of the 'basic' snippet which is required to be included - module_data = module_data.replace(REPLACER_VERSION, to_bytes(__version__, nonstring='repr')) + module_data = module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__))) module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args) module_data = module_data.replace(REPLACER_WINARGS, module_args_json) module_data = module_data.replace(REPLACER_JSONARGS, module_args_json) From 7cb29cdbec631feec0569d9da6d8b0cdfa1f3c05 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 26 Feb 2016 17:59:00 -0800 Subject: [PATCH 0740/1113] Workaround py2.6's StringIO --- test/units/parsing/yaml/test_loader.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/test/units/parsing/yaml/test_loader.py b/test/units/parsing/yaml/test_loader.py index 48a60b6c537..253ba4d5914 100644 --- a/test/units/parsing/yaml/test_loader.py +++ b/test/units/parsing/yaml/test_loader.py @@ -36,6 +36,13 @@ except ImportError: from yaml.parser import ParserError +class NameStringIO(StringIO): + """In py2.6, StringIO doesn't let you set name because a baseclass has it + as readonly property""" + name = None + def __init__(self, *args, **kwargs): + super(NameStringIO, self).__init__(*args, **kwargs) + class TestAnsibleLoaderBasic(unittest.TestCase): def setUp(self): @@ -155,7 +162,7 @@ class TestAnsibleLoaderBasic(unittest.TestCase): class TestAnsibleLoaderPlay(unittest.TestCase): def setUp(self): - stream = StringIO(u""" + stream = NameStringIO(u""" - hosts: localhost vars: number: 1 From cedf37c53193b766b1d14fd593cc66fc9e15200b Mon Sep 17 00:00:00 2001 From: Jun Matsushita <jun@iilab.org> Date: Sat, 27 Feb 2016 12:36:08 +0100 Subject: [PATCH 0741/1113] Small type --- ticket_stubs/module_repo.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ticket_stubs/module_repo.md b/ticket_stubs/module_repo.md index 13791eaaa2e..eb9c96da044 100644 --- a/ticket_stubs/module_repo.md +++ b/ticket_stubs/module_repo.md @@ -6,7 +6,7 @@ Hi! Thanks very much for your interest in Ansible. It sincerely means a lot to us. This appears to be a submission about a module, and aside from action_plugins, if you know what those are, the modules -in ansible are now moved two separate repos. We would appreciate if you can submit this there instead. +in ansible are now moved to separate repos. We would appreciate if you can submit this there instead. If this is about a new module, submit pull requests or ideas to: From de0e7de15c4171925ccee5de56d30962f36c3387 Mon Sep 17 00:00:00 2001 From: Jun Matsushita <jun@iilab.org> Date: Sat, 27 Feb 2016 12:39:58 +0100 Subject: [PATCH 0742/1113] Updated versions with current released and development --- docsite/rst/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index 4f77125bb9a..4a11328a2f3 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -16,7 +16,7 @@ We believe simplicity is relevant to all sizes of environments, so we design for Ansible manages machines in an agent-less manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. Because OpenSSH is one of the most peer-reviewed open source components, security exposure is greatly reduced. Ansible is decentralized--it relies on your existing OS credentials to control access to remote machines. If needed, Ansible can easily connect with Kerberos, LDAP, and other centralized authentication management systems. -This documentation covers the current released version of Ansible (1.9.1) and also some development version features (2.0). For recent features, we note in each section the version of Ansible where the feature was added. +This documentation covers the current released version of Ansible (2.0.1) and also some development version features (2.1). For recent features, we note in each section the version of Ansible where the feature was added. Ansible, Inc. releases a new major release of Ansible approximately every two months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup. However, the community around new modules and plugins being developed and contributed moves very quickly, typically adding 20 or so new modules in each release. From e58843706742d3fcd6c1905e57bf130d8b0220de Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 27 Feb 2016 10:02:45 -0500 Subject: [PATCH 0743/1113] Adding some unicode params to the ActionBase module formatting test --- test/units/plugins/action/test_action.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index 57aa38c20a9..6b782e1322c 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python # -*- coding: utf-8 -*- # (c) 2015, Florian Apolloner <florian@apolloner.eu> # @@ -209,7 +210,7 @@ class TestActionBase(unittest.TestCase): # test python module formatting with patch.object(builtins, 'open', mock_open(read_data=to_bytes(python_module_replacers.strip(), encoding='utf-8'))) as m: - mock_task.args = dict(a=1) + mock_task.args = dict(a=1, foo='fö〩') mock_connection.module_implementation_preferences = ('',) (style, shebang, data) = action_base._configure_module(mock_task.action, mock_task.args) self.assertEqual(style, "new") From 192e22f6f41a022bd69f48c03fdbe6898d6b0598 Mon Sep 17 00:00:00 2001 From: Yannik <yannik@sembritzki.me> Date: Sun, 28 Feb 2016 10:11:07 +0100 Subject: [PATCH 0744/1113] fix typo --- docsite/rst/become.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 93c96d9a3c3..8fb10759958 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -71,7 +71,7 @@ For those from Pre 1.9 , sudo and su still work! For those using old playbooks will not need to be changed, even though they are deprecated, sudo and su directives, variables and options will continue to work. It is recommended to move to become as they may be retired at one point. -ou cannot mix directives on the same object (become and sudo) though, Ansible will complain if you try to. +Xou cannot mix directives on the same object (become and sudo) though, Ansible will complain if you try to. Become will default to using the old sudo/su configs and variables if they exist, but will override them if you specify any of the new ones. From 767856c46354e57fd2dc70c2fa21c8660bab0a31 Mon Sep 17 00:00:00 2001 From: Yannik <yannik@sembritzki.me> Date: Sun, 28 Feb 2016 22:19:20 +0100 Subject: [PATCH 0745/1113] fix typo --- docsite/rst/become.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 8fb10759958..4a5f9730153 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -71,7 +71,7 @@ For those from Pre 1.9 , sudo and su still work! For those using old playbooks will not need to be changed, even though they are deprecated, sudo and su directives, variables and options will continue to work. It is recommended to move to become as they may be retired at one point. -Xou cannot mix directives on the same object (become and sudo) though, Ansible will complain if you try to. +You cannot mix directives on the same object (become and sudo) though, Ansible will complain if you try to. Become will default to using the old sudo/su configs and variables if they exist, but will override them if you specify any of the new ones. From f67bf3f77570e6f0f57d04dc206a86016d42a3f7 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Sat, 27 Feb 2016 16:42:43 -0500 Subject: [PATCH 0746/1113] updates the ops_template plugin action backup key This commit changes the key the ops_template will search for in order to backup the current configuration to local disk on the Ansible control host. This change was made to make ops_template consistent with the other network template modules. --- lib/ansible/plugins/action/ops_template.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/action/ops_template.py b/lib/ansible/plugins/action/ops_template.py index 84924fdb742..4b45c03f5c6 100644 --- a/lib/ansible/plugins/action/ops_template.py +++ b/lib/ansible/plugins/action/ops_template.py @@ -35,15 +35,15 @@ class ActionModule(NetActionModule, ActionBase): if isinstance(self._task.args['src'], basestring): self._handle_template() - self._task.args['config'] = task_vars.get('config') - result.update(self._execute_module(module_name=self._task.action, module_args=self._task.args, task_vars=task_vars)) - if self._task.args.get('backup') and '_config' in result: - contents = json.dumps(result['_config'], indent=4) + if self._task.args.get('backup') and result.get('_backup'): + contents = json.dumps(result['_backup'], indent=4) self._write_backup(task_vars['inventory_hostname'], contents) - del result['_config'] + + if '_backup' in result: + del result['_backup'] return result From 78f4d33c94b8c9990e9b618a1daedc6951bf7fee Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Wed, 24 Feb 2016 13:01:04 -0500 Subject: [PATCH 0747/1113] Add docker_container module proposal. --- .../docker/docker_container_moduler.md | 525 ++++++++++++++++++ 1 file changed, 525 insertions(+) create mode 100644 docs/proposals/docker/docker_container_moduler.md diff --git a/docs/proposals/docker/docker_container_moduler.md b/docs/proposals/docker/docker_container_moduler.md new file mode 100644 index 00000000000..f77966e9869 --- /dev/null +++ b/docs/proposals/docker/docker_container_moduler.md @@ -0,0 +1,525 @@ +# Docker_Container Module Proposal + +## Purpose and Scope: + +The purpose of docker_container is to manage the lifecycle of a container. The module will provide a mechanism for +moving the container between absent, present, stopped and started states. It will focus purely on managing container +state. The intention of the narrow focus is to make understanding and using the module clear and to keep maintenance +and testing as easy as possible. + +Docker_container will manage a container using docker-py to communicate with either a local or remote API. It will +support API versions >= 1.14. API connection details will be handled externally in a shared utility module similar to +how other cloud modules operate. + +The container world is moving rapidly, so the goal is to create a suite of docker modules that keep pace, with docker_container +leading the way. If this project is successful, it will naturally deprecate the existing docker module. + +## Parameters: + +Docker_container will accept the parameters listed below. An attempt has been made to represent all the options available to +docker's create, kill, pause, run, rm, start, stop and update commands. + +Parameters related to connecting to the API are not listed here. + +``` +blkio_weight: + description: + - Block IO weight + default: null + +blkio_weight_devices: + description: + - List of C(DEVICE_NAME:WEIGHT) pairs + default: null + +capabilities: + description: + - List of capabilities to add to the container. + default: null + +capabilities_drop: + description: + - List of capabilities to remove from the container + default: null + +cgroup_parent: + description: + - Optional parent cgroup for the container + default: null + +command: + description: + - Command executed in the container when it starts. + default: null + +cpu_period: + description: + - Limit CPU CFS (Completely Fair Scheduler) period. Expressed in milliseconds. + default: 0 + +cpu_quota: + description: + - Limit CPU CFS (Completely Fair Scheduler) quota. Expressed in milliseconds. + default: 0 + +cpuset_cpus: + description: + - CPUs in which to allow execution C(1,3) or C(1-3). + default: null + +cpuset_mems: + description: + - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1) + default: null + +cpu_shares: + description: + - CPU shares. Integer value. + default: 0 + +detach: + description: + - Enable detached mode to leave the container running in background. + If disabled, fail unless the process exits cleanly. + default: true + +devices: + description: + - List of host devices to add to the container + default: null + +device_read_bps + description: + - List. Limit read rate (bytes per second) from a device in the format C(/dev/sda:1mb) + default: null + +device_read_iops: + description: + - List. Limit read rate (IO per second) from a device in the format C(/dev/sda:1000) + default: null + +device_write_bps: + description: + - List. Limit write rate (bytes per second) to a device in the foramt C(/dev/sda:1mb) + default: null + +device_write_iops: + description: + - List. Limit write rate (IO per second) to a device C(/dev/sda:1000) + default: null + +dns_servers: + description: + - List of custom DNS servers. + default: null + +dns_opts: + description: + - List of custom DNS options. Each option is written as an options line + into the container's /etc/resolv.conf. + default: null + +dns_search_domains: + description: + - List of custom DNS search domains. + default: null + +env_vars: + description: + - Dictionary of key,value pairs. + default: null + +entrypoint: + description: + - Overwrite the default ENTRYPOINT of the image. + default: null + +etc_hosts: + description: + - List of custom host-to-IP mappings, with each mapping in the format C(host:ip), to be + added to the container's /etc/hosts file. + default: null + +exposed_ports: + description: + - List of additional container ports to expose for port mappings or links. + If the port is already exposed using EXPOSE in a Dockerfile, it does not + need to be exposed again. + default: null + +force_kill: + description: + - Use with absent, present, started and stopped states to use the kill command rather + than the stop command. + default: false + +groups: + description: + - List of additional groups to join. + default: null + +hostname: + description: + - Container hostname. + default: null + +image: + description: + - Container image used to create and match containers. + required: true + +interactive: + description: + - Keep stdin open after a container is launched, even if not attached. + default: false + +ip_address: + description: + - Container IPv4 address. + default: null + +ipv6_address: + description: + - Container IPv6 address. + default: null + +ipc_namespace: + description: + - Container IPC namespace. + default: null + +keep_volumes: + description: + - Retain volumes associated with a removed container. + default: false + +kill_signal: + description: + - Signal used to kill a running container when state is absent. + default: KILL + +kernel_memory: + description: + - Kernel memory limit (format: <number>[<unit>]). Number is a positive integer. + Unit can be one of b, k, m, or g. Minimum is 4M. + default: 0 + +labels: + description: + - Set meta data on the cotnainer. Dictionary of key value paris: + default: null + +links: + description: + - List of name aliases for linked containers in the format C(redis:myredis) + default: null + +log_driver: + description: + - Specify the logging driver. + choices: + - json-file + - syslog + - journald + - gelf + - fluentd + - awslogs + - splunk + defult: json-file + +log_opt: + description: + - Additional options to pass to the logging driver selected above. See Docker `log-driver + <https://docs.docker.com/reference/logging/overview/>` documentation for more information. + required: false + default: null + +mac_address: + description: + - Container MAC address (e.g. 92:d0:c6:0a:29:33) +default: null + +memory: + description: + - Memory limit (format: <number>[<unit>]). Number is a positive integer. + Unit can be one of b, k, m, or g + default: 0 + +memory_reservation: + description: + - Memory soft limit (format: <number>[<unit>]). Number is a positive integer. + Unit can be one of b, k, m, or g + default: 0 + +memory_swap: + description: + - Total memory limit (memory + swap, format:<number>[<unit>]). + Number is a positive integer. Unit can be one of b, k, m, or g. + default: 0 + +memory_swapiness: + description: + - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + default: 0 + +name: + description: + - Assign a name to a new container or match an existing container. + - When identifying an existing container name may be a name or a long or short container ID. + required: true + +net: + description: + - Connect the container to a network. + choices: + - bridge + - container:<name|id> + - host + default: null + +net_alias: + description: + - Add network scoped alias to the container. + default: null + +paused: + description: + - Use with the started state to pause running processes inside the container. + default: false + +pid: + description: + - Set the PID namespace mode for the container. Currenly only supports 'host'. + default: null + +privileged: + description: + - Give extended privileges to the container. + default: false + +published_ports: + description: + - List of ports to publish from the container to the host. + - Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a + container port, 9000 is a host port, and 0.0.0.0 is a host interface. + - Container ports must be exposed either in the Dockerfile or via the C(expose) option. + - A value of ALL will publish all exposed container ports to random host ports, ignoring + any other mappings. + +read_only: + description: + - Mount the container's root file system as read-only. + default: false + +recreate: + description: + - Use with present and started states to force the re-creation of an existing container. + default: false + +restart: + description: + - Use with started state to force a matching container to be stopped and restarted. + default: false + +restart_policy: + description: + - Container restart policy. + choices: + - no + - on-failure + - always + - unless-stopped + default: no + +restart_policy_retry: + description: + - When C(restart_policy) is on-failure sets the max number of retries. + default: 0 + +shm_size: + description: + - Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). + - Ommitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`. + default: null + +security_opts: + description: + - List of security options in the form of C("label:user:User") + default: null + +state: + description: + - "absent" - A container matching the specified name will be stopped and removed. Use force_kill to kill the container + rather than stopping it. Use keep_volumes to retain volumes associated with the removed container. + + - "present" - Asserts the existence of a container matching the name and any provided configuration parameters. If no + container matches the name, a container will be created. If a container matches the name but the provided configuration + does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created + with the requested config. Use recreate to force the re-creation of the matching container. Use force_kill to kill the + container rather than stopping it. Use keep_volumes to retain volumes associated with a removed container. + + - "started" - Asserts there is a running container matching the name and any provided configuration. If no container + matches the name, a container will be created and started. If a container matching the name is found but the + configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed + and a new container will be created with the requested configuration and started. Use recreate to always re-create a + matching container, even if it is running. Use restart to force a matching container to be stopped and restarted. Use + force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated with a removed + container. + + - "stopped" - a container matching the specified name will be stopped. Use force_kill to kill a container rather than + stopping it. + + required: false + default: started + choices: + - absent + - present + - stopped + - started + +stop_signal: + description: + - Signal used to stop the container. + default: SIGINT + +stop_timeout: + description: + - Number of seconds to wait for the container to stop before sending SIGKILL. + required: false + +trust_image_content: + description: + - If true, skip image verification. + default: false + +tty: + description: + - Allocate a psuedo-TTY. + default: false + +ulimit: + description: + - List of ulimit options. A ulimit is specified as C(nofile=262144:262144) + default: null + +user: + description + - Sets the username or UID used and optionally the groupname or GID for the specified command. + - Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ] + default: null + +uts: + description: + - Set the UTS namespace mode for the container. + default: null + +volumes: + description: + - List of volumes to mount within the container. + - 'Use docker CLI-style syntax: C(/host:/container[:mode])' + - You can specify a read mode for the mount with either C(ro) or C(rw). + Starting at version 2.1, SELinux hosts can additionally use C(z) or C(Z) + mount options to use a shared or private label for the volume. +default: null + +volumes_from: + description: + - List of container names to mount volumes from. + default: null +``` + + +## Examples: + +``` +- name: Create a data container + docker_container: + name: mydata + image: busybox + volumes: + - /data + +- name: Re-create a redis container + docker_container: + name: myredis + image: redis + command: redis-server --appendonly yes + state: present + recreate: yes + expose: + - 6379 + volumes_from: + - mydata + +- name: Restart a container + docker_container: + name: myapplication + image: someuser/appimage + state: started + restart: yes + links: + - "myredis:aliasedredis" + devices: + - "/dev/sda:/dev/xvda:rwm" + ports: + - "8080:9000" + - "127.0.0.1:8081:9001/udp" + env: + SECRET_KEY: ssssh + + +- name: Container present + docker_container: + name: mycontainer + state: present + recreate: yes + forcekill: yes + image: someplace/image + command: echo "I'm here!" + + +- name: Start 4 load-balanced containers + docker_container: + name: "container{{ item }}" + state: started + recreate: yes + image: someuser/anotherappimage + command: sleep 1d + with_sequence: count=4 + +-name: remove container + docker_container: + name: ohno + state: absent + +- name: Syslogging output + docker_container: + name: myservice + state: started + log_driver: syslog + log_opt: + syslog-address: tcp://my-syslog-server:514 + syslog-facility: daemon + syslog-tag: myservice + +``` + +## Returns: + +The JSON object returned by the module will include a *results* object providing `docker inspect` output for the affected container. + +``` +{ + changed: True, + results: { + < the results of `docker inspect` > + } +} +``` + +## Contributors + +[chouseknecht](http://twitter.com/chouseknecht) + +*Last Updated:* 2016-02-24 From e27eb73cba96deecc2264bfb5ac36cffe761e888 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Mon, 29 Feb 2016 09:05:48 -0600 Subject: [PATCH 0748/1113] sys.subversion unavailable in py24. Fixes #14704 --- lib/ansible/module_utils/facts.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 23007e1689e..974d47a55d9 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -808,9 +808,12 @@ class Facts(object): }, 'version_info': list(sys.version_info), 'executable': sys.executable, - 'type': sys.subversion[0], 'has_sslcontext': HAS_SSLCONTEXT } + try: + self.facts['python']['type'] = sys.subversion[0] + except AttributeError: + self.facts['python']['type'] = None class Hardware(Facts): From 35880e625435c573c4f4f4f3bfb94e768c6523f8 Mon Sep 17 00:00:00 2001 From: Vladimir Rutsky <rutsky@users.noreply.github.com> Date: Mon, 29 Feb 2016 18:21:05 +0300 Subject: [PATCH 0749/1113] fix reference to chapter + trailing dot --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 22be4abeef2..e0fb548d3b7 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -936,7 +936,7 @@ how all of these things can work together. Advanced Syntax ``````````````` -For information about advanced YAML syntax used to declare variables and have more control over the data placed in YAML files used by Ansible, see `:doc:playbooks_advanced_syntax` +For information about advanced YAML syntax used to declare variables and have more control over the data placed in YAML files used by Ansible, see :doc:`playbooks_advanced_syntax`. .. seealso:: From 6c7680b6333985fe1d5d4a9dc27c49c5c3bd0094 Mon Sep 17 00:00:00 2001 From: brianlycett <brian.lycett@ontrackretail.co.uk> Date: Mon, 29 Feb 2016 16:04:45 +0000 Subject: [PATCH 0750/1113] Update YAMLSyntax.rst Small updates loosely based on suggestions. :) --- docsite/rst/YAMLSyntax.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index b8d19099eb3..c5d94e50ff0 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -42,7 +42,7 @@ A dictionary is represented in a simple ``key: value`` form (the colon must be f job: Developer skill: Elite -More complicated data structures are possible, such as lists of dictionaries, or dictionaries whose values are lists. Or a mix of both:: +More complicated data structures are possible, such as lists of dictionaries, dictionaries whose values are lists or a mix of both:: # Employee records - martin: @@ -77,7 +77,7 @@ Ansible doesn't really use these too much, but you can also specify a boolean va likes_emacs: TRUE uses_cvs: false -Values can span multiple lines using *|* or *>*. Spanning multiple lines using a *|* will include the newlines. Using a *>* will ignore newlines; it's used to make what would otherwise be a very long line easier to read and edit. +Values can span multiple lines using `|` or `>`. Spanning multiple lines using a `|` will include the newlines. Using a `>` will ignore newlines; it's used to make what would otherwise be a very long line easier to read and edit. In either case the indentation will be ignored. Examples are:: From 18e4a02c25940bbc8ca71a9b24e3a30933b65877 Mon Sep 17 00:00:00 2001 From: brianlycett <brian.lycett@ontrackretail.co.uk> Date: Mon, 29 Feb 2016 16:06:06 +0000 Subject: [PATCH 0751/1113] Update YAMLSyntax.rst Ach - it's double backquotes in this markup. --- docsite/rst/YAMLSyntax.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index c5d94e50ff0..219d78e4143 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -77,7 +77,7 @@ Ansible doesn't really use these too much, but you can also specify a boolean va likes_emacs: TRUE uses_cvs: false -Values can span multiple lines using `|` or `>`. Spanning multiple lines using a `|` will include the newlines. Using a `>` will ignore newlines; it's used to make what would otherwise be a very long line easier to read and edit. +Values can span multiple lines using ``|`` or ``>``. Spanning multiple lines using a ``|`` will include the newlines. Using a ``>`` will ignore newlines; it's used to make what would otherwise be a very long line easier to read and edit. In either case the indentation will be ignored. Examples are:: From d0a717694fa12ad5cff10fd683df1345fa165505 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Mon, 29 Feb 2016 10:12:17 -0600 Subject: [PATCH 0752/1113] Fix variable name in paramiko connection plugin --- lib/ansible/plugins/connection/paramiko_ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py index 9c061afae7c..5f311756e83 100644 --- a/lib/ansible/plugins/connection/paramiko_ssh.py +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -149,7 +149,7 @@ class Connection(ConnectionBase): getattr(self._play_context, 'ssh_common_args', ''), getattr(self._play_context, 'ssh_args', ''), ] - if ssh_common_args is not None: + if ssh_args is not None: args = self._split_ssh_args(' '.join(ssh_args)) for i, arg in enumerate(args): if arg.lower() == 'proxycommand': From a9d25f455c705fe30798c2f6fe3afe74e74e1979 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 29 Feb 2016 12:59:44 -0500 Subject: [PATCH 0753/1113] More unit tests for ActionBase --- test/units/plugins/action/test_action.py | 322 ++++++++++++++++++++++- 1 file changed, 321 insertions(+), 1 deletion(-) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index 6b782e1322c..85ac29e4ca6 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -24,6 +24,7 @@ __metaclass__ = type import ast import json import pipes +import os from sys import version_info if version_info[0] == 2: @@ -144,6 +145,7 @@ if version_info >= (3,) and version_info < (3, 4, 4): class DerivedActionBase(ActionBase): + TRANSFERS_FILES = False def run(self, tmp=None, task_vars=None): # We're not testing the plugin run() method, just the helper # methods ActionBase defines @@ -283,7 +285,325 @@ class TestActionBase(unittest.TestCase): mock_task.environment = ['hi there'] self.assertRaises(AnsibleError, action_base._compute_environment_string) - def test_sudo_only_if_user_differs(self): + def test_action_base__early_needs_tmp_path(self): + # create our fake task + mock_task = MagicMock() + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + self.assertFalse(action_base._early_needs_tmp_path()) + + action_base.TRANSFERS_FILES = True + self.assertTrue(action_base._early_needs_tmp_path()) + + def test_action_base__late_needs_tmp_path(self): + # create our fake task + mock_task = MagicMock() + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + # assert no temp path is required because tmp is set + self.assertFalse(action_base._late_needs_tmp_path("/tmp/foo", "new")) + + # assert no temp path is required when using a new-style module + # with pipelining supported and enabled with no become method + mock_connection.has_pipelining = True + play_context.pipelining = True + play_context.become_method = None + self.assertFalse(action_base._late_needs_tmp_path(None, "new")) + + # assert a temp path is required for each of the following: + # the module style is not 'new' + mock_connection.has_pipelining = True + play_context.pipelining = True + play_context.become_method = None + self.assertTrue(action_base._late_needs_tmp_path(None, "old")) + # connection plugin does not support pipelining + mock_connection.has_pipelining = False + play_context.pipelining = True + play_context.become_method = None + self.assertTrue(action_base._late_needs_tmp_path(None, "new")) + # pipelining is disabled via the play context settings + mock_connection.has_pipelining = True + play_context.pipelining = False + play_context.become_method = None + self.assertTrue(action_base._late_needs_tmp_path(None, "new")) + # keep remote files is enabled + # FIXME: implement + # the become method is 'su' + mock_connection.has_pipelining = True + play_context.pipelining = True + play_context.become_method = 'su' + self.assertTrue(action_base._late_needs_tmp_path(None, "new")) + + def test_action_base__make_tmp_path(self): + # create our fake task + mock_task = MagicMock() + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + mock_connection.transport = 'ssh' + mock_connection._shell.mkdtemp.return_value = 'mkdir command' + mock_connection._shell.join_path.side_effect = os.path.join + + # we're using a real play context here + play_context = PlayContext() + play_context.become = True + play_context.become_user = 'foo' + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + action_base._low_level_execute_command = MagicMock() + action_base._low_level_execute_command.return_value = dict(rc=0, stdout='/some/path') + self.assertEqual(action_base._make_tmp_path(), '/some/path/') + + # empty path fails + action_base._low_level_execute_command.return_value = dict(rc=0, stdout='') + self.assertRaises(AnsibleError, action_base._make_tmp_path) + + # authentication failure + action_base._low_level_execute_command.return_value = dict(rc=5, stdout='') + self.assertRaises(AnsibleError, action_base._make_tmp_path) + + # ssh error + action_base._low_level_execute_command.return_value = dict(rc=255, stdout='', stderr='') + self.assertRaises(AnsibleError, action_base._make_tmp_path) + play_context.verbosity = 5 + self.assertRaises(AnsibleError, action_base._make_tmp_path) + + # general error + action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='') + self.assertRaises(AnsibleError, action_base._make_tmp_path) + action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='No space left on device') + self.assertRaises(AnsibleError, action_base._make_tmp_path) + + def test_action_base__remove_tmp_path(self): + # create our fake task + mock_task = MagicMock() + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + mock_connection._shell.remove.return_value = 'rm some stuff' + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + action_base._low_level_execute_command = MagicMock() + # these don't really return anything or raise errors, so + # we're pretty much calling these for coverage right now + action_base._remove_tmp_path('/bad/path/dont/remove') + action_base._remove_tmp_path('/good/path/to/ansible-tmp-thing') + + @patch('os.unlink') + @patch('os.fdopen') + @patch('tempfile.mkstemp') + def test_action_base__transfer_data(self, mock_mkstemp, mock_fdopen, mock_unlink): + # create our fake task + mock_task = MagicMock() + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + mock_connection.put_file.return_value = None + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + mock_afd = MagicMock() + mock_afile = MagicMock() + mock_mkstemp.return_value = (mock_afd, mock_afile) + + mock_unlink.return_value = None + + mock_afo = MagicMock() + mock_afo.write.return_value = None + mock_afo.flush.return_value = None + mock_afo.close.return_value = None + mock_fdopen.return_value = mock_afo + + self.assertEqual(action_base._transfer_data('/path/to/remote/file', 'some data'), '/path/to/remote/file') + self.assertEqual(action_base._transfer_data('/path/to/remote/file', 'some mixed data: fö〩'), '/path/to/remote/file') + self.assertEqual(action_base._transfer_data('/path/to/remote/file', dict(some_key='some value')), '/path/to/remote/file') + self.assertEqual(action_base._transfer_data('/path/to/remote/file', dict(some_key='fö〩')), '/path/to/remote/file') + + mock_afo.write.side_effect = Exception() + self.assertRaises(AnsibleError, action_base._transfer_data, '/path/to/remote/file', '') + + def test_action_base__execute_remote_stat(self): + # create our fake task + mock_task = MagicMock() + + # create a mock connection, so we don't actually try and connect to things + mock_connection = MagicMock() + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + action_base._execute_module = MagicMock() + + # test normal case + action_base._execute_module.return_value = dict(stat=dict(checksum='1111111111111111111111111111111111', exists=True)) + res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False) + self.assertEqual(res['checksum'], '1111111111111111111111111111111111') + + # test does not exist + action_base._execute_module.return_value = dict(stat=dict(exists=False)) + res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False) + self.assertFalse(res['exists']) + self.assertEqual(res['checksum'], '1') + + # test no checksum in result from _execute_module + action_base._execute_module.return_value = dict(stat=dict(exists=True)) + res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False) + self.assertTrue(res['exists']) + self.assertEqual(res['checksum'], '') + + # test stat call failed + action_base._execute_module.return_value = dict(failed=True, msg="because I said so") + self.assertRaises(AnsibleError, action_base._execute_remote_stat, path='/path/to/file', all_vars=dict(), follow=False) + + def test_action_base__execute_module(self): + # create our fake task + mock_task = MagicMock() + mock_task.action = 'copy' + mock_task.args = dict(a=1, b=2, c=3) + + # create a mock connection, so we don't actually try and connect to things + def build_module_command(env_string, shebang, cmd, arg_path=None, rm_tmp=None): + to_run = [env_string, cmd] + if arg_path: + to_run.append(arg_path) + if rm_tmp: + to_run.append(rm_tmp) + return " ".join(to_run) + + mock_connection = MagicMock() + mock_connection.build_module_command.side_effect = build_module_command + mock_connection._shell.join_path.side_effect = os.path.join + + # we're using a real play context here + play_context = PlayContext() + + # our test class + action_base = DerivedActionBase( + task=mock_task, + connection=mock_connection, + play_context=play_context, + loader=None, + templar=None, + shared_loader_obj=None, + ) + + # fake a lot of methods as we test those elsewhere + action_base._configure_module = MagicMock() + action_base._supports_check_mode = MagicMock() + action_base._late_needs_tmp_path = MagicMock() + action_base._make_tmp_path = MagicMock() + action_base._transfer_data = MagicMock() + action_base._compute_environment_string = MagicMock() + action_base._remote_chmod = MagicMock() + action_base._low_level_execute_command = MagicMock() + + action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data') + action_base._late_needs_tmp_path.return_value = False + action_base._compute_environment_string.return_value = '' + action_base._connection.has_pipelining = True + action_base._low_level_execute_command.return_value = dict(stdout='{"rc": 0, "stdout": "ok"}') + self.assertEqual(action_base._execute_module(module_name=None, module_args=None), dict(rc=0, stdout="ok", stdout_lines=['ok'])) + self.assertEqual(action_base._execute_module(module_name='foo', module_args=dict(z=9, y=8, x=7), task_vars=dict(a=1)), dict(rc=0, stdout="ok", stdout_lines=['ok'])) + + # test with needing/removing a remote tmp path + action_base._configure_module.return_value = ('old', '#!/usr/bin/python', 'this is the module data') + action_base._late_needs_tmp_path.return_value = True + action_base._make_tmp_path.return_value = '/the/tmp/path' + self.assertEqual(action_base._execute_module(), dict(rc=0, stdout="ok", stdout_lines=['ok'])) + + action_base._configure_module.return_value = ('non_native_want_json', '#!/usr/bin/python', 'this is the module data') + self.assertEqual(action_base._execute_module(), dict(rc=0, stdout="ok", stdout_lines=['ok'])) + + play_context.become = True + play_context.become_user = 'foo' + self.assertEqual(action_base._execute_module(), dict(rc=0, stdout="ok", stdout_lines=['ok'])) + + # test an invalid shebang return + action_base._configure_module.return_value = ('new', '', 'this is the module data') + action_base._late_needs_tmp_path.return_value = False + self.assertRaises(AnsibleError, action_base._execute_module) + + # test with check mode enabled, once with support for check + # mode and once with support disabled to raise an error + play_context.check_mode = True + action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data') + self.assertEqual(action_base._execute_module(), dict(rc=0, stdout="ok", stdout_lines=['ok'])) + action_base._supports_check_mode = False + self.assertRaises(AnsibleError, action_base._execute_module) + + def test_action_base_sudo_only_if_user_differs(self): play_context = PlayContext() action_base = DerivedActionBase(None, None, play_context, None, None, None) action_base._connection = MagicMock(exec_command=MagicMock(return_value=(0, '', ''))) From 7c049c3200915c97bb3da26814c45872f030e4ee Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 29 Feb 2016 14:51:23 -0500 Subject: [PATCH 0754/1113] Fixing up jsonify and adding unit tests --- lib/ansible/parsing/utils/jsonify.py | 8 ++--- test/units/parsing/utils/__init__.py | 21 ++++++++++++++ test/units/parsing/utils/test_jsonify.py | 37 ++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 6 deletions(-) create mode 100644 test/units/parsing/utils/__init__.py create mode 100644 test/units/parsing/utils/test_jsonify.py diff --git a/lib/ansible/parsing/utils/jsonify.py b/lib/ansible/parsing/utils/jsonify.py index 59dbf9f8c4c..74e015a5cf0 100644 --- a/lib/ansible/parsing/utils/jsonify.py +++ b/lib/ansible/parsing/utils/jsonify.py @@ -29,17 +29,13 @@ def jsonify(result, format=False): if result is None: return "{}" - result2 = result.copy() - for key, value in result2.items(): - if type(value) is str: - result2[key] = value.decode('utf-8', 'ignore') indent = None if format: indent = 4 try: - return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False) + return json.dumps(result, sort_keys=True, indent=indent, ensure_ascii=False) except UnicodeDecodeError: - return json.dumps(result2, sort_keys=True, indent=indent) + return json.dumps(result, sort_keys=True, indent=indent) diff --git a/test/units/parsing/utils/__init__.py b/test/units/parsing/utils/__init__.py new file mode 100644 index 00000000000..d3d562f5d32 --- /dev/null +++ b/test/units/parsing/utils/__init__.py @@ -0,0 +1,21 @@ +# (c) 2016, James Cammarata <jimi@sngx.net> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/parsing/utils/test_jsonify.py b/test/units/parsing/utils/test_jsonify.py new file mode 100644 index 00000000000..f1cf36ad50b --- /dev/null +++ b/test/units/parsing/utils/test_jsonify.py @@ -0,0 +1,37 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2016, James Cammarata <jimi@sngx.net> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.parsing.utils.jsonify import jsonify + +class TestJsonify(unittest.TestCase): + def test_jsonify_simple(self): + self.assertEqual(jsonify(dict(a=1, b=2, c=3)), '{"a": 1, "b": 2, "c": 3}') + + def test_jsonify_simple_format(self): + self.assertEqual(jsonify(dict(a=1, b=2, c=3), format=True), '{\n "a": 1, \n "b": 2, \n "c": 3\n}') + + def test_jsonify_unicode(self): + self.assertEqual(jsonify(dict(toshio=u'くらとみ')), u'{"toshio": "くらとみ"}') + + def test_jsonify_empty(self): + self.assertEqual(jsonify(None), '{}') From c56e3aabfb9014e15e81cacb3a98175087a34aaa Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 29 Feb 2016 15:08:59 -0500 Subject: [PATCH 0755/1113] Clean up jsonify unit test with format to remove json lib differences --- test/units/parsing/utils/test_jsonify.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/units/parsing/utils/test_jsonify.py b/test/units/parsing/utils/test_jsonify.py index f1cf36ad50b..4de92b8bad1 100644 --- a/test/units/parsing/utils/test_jsonify.py +++ b/test/units/parsing/utils/test_jsonify.py @@ -28,7 +28,9 @@ class TestJsonify(unittest.TestCase): self.assertEqual(jsonify(dict(a=1, b=2, c=3)), '{"a": 1, "b": 2, "c": 3}') def test_jsonify_simple_format(self): - self.assertEqual(jsonify(dict(a=1, b=2, c=3), format=True), '{\n "a": 1, \n "b": 2, \n "c": 3\n}') + res = jsonify(dict(a=1, b=2, c=3), format=True) + cleaned = "".join([x.strip() for x in res.splitlines()]) + self.assertEqual(cleaned, '{"a": 1,"b": 2,"c": 3}') def test_jsonify_unicode(self): self.assertEqual(jsonify(dict(toshio=u'くらとみ')), u'{"toshio": "くらとみ"}') From 67d5b77898872335f18fd58f3aa53798ef7a2e31 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 29 Feb 2016 15:21:17 -0500 Subject: [PATCH 0756/1113] added package to special 'squashable' fields apt has it as alias, this should fix https://github.com/ansible/ansible-modules-core/issues/3145 --- lib/ansible/executor/task_executor.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 94ad6786c02..b59295b0ed7 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -269,7 +269,12 @@ class TaskExecutor: if len(items) > 0 and task_action in self.SQUASH_ACTIONS: if all(isinstance(o, string_types) for o in items): final_items = [] - name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None) + + name = None + for allowed in ['name', 'pkg', 'package']: + name = self._task.args.pop(allowed, None) + if name is not None: + break # This gets the information to check whether the name field # contains a template that we can squash for From 481d56b789f86b0edd7a4fe4908fb12b6f4e058f Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 28 Jan 2016 16:32:19 -0500 Subject: [PATCH 0757/1113] More CHANGELOG updates for 2.0.1 --- CHANGELOG.md | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3889f124432..a67f7ad82e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,40 @@ Ansible Changes By Release * debug is now controlable with verbosity ## 2.0 "Over the Hills and Far Away" +## 2.0.1 "Over the Hills and Far Away" + +* Fixes a major compatibility break in the synchronize module shipped with + 2.0.0.x. That version of synchronize ran sudo on the controller prior to + running rsync. In 1.9.x and previous, sudo was run on the host that rsync + connected to. 2.0.1 restores the 1.9.x behaviour. +* Additionally, several other problems with where synchronize chose to run when + combined with delegate_to were fixed. In particular, if a playbook targetted + localhost and then delegated_to a remote host the prior behavior (in 1.9.x + and 2.0.0.x) was to copy files between the src and destination directories on + the delegated host. This has now been fixed to copy between localhost and + the delegated host. +* Fix a regression where synchronize was unable to deal with unicode paths. +* Fix a regression where synchronize deals with inventory hosts that use + localhost but with an alternate port. +* Fixes a regression where the retry files feature was not implemented. +* Fixes a regression where the any_errors_fatal option was implemented in 2.0 + incorrectly, and also adds a feature where any_errors_fatal can be set at + the block level. +* Fix tracebacks when playbooks or ansible itself were located in directories + with unicode characters. +* Fix bug when sending unicode characters to an external pager for display. +* Fix a bug with squashing loops for special modules (mostly package managers). + The optimization was squashing when the loop did not apply to the selection + of packages. This has now been fixed. +* Temp files created when using vault are now "shredded" using the unix shred + program which overwrites the file with random data. +* Some fixes to cloudstack modules for case sensitivity +* Fix non-newstyle modules (non-python modules and old-style modules) to + disabled pipelining. +* Fix fetch module failing even if fail_on_missing is set to False +* Fix for cornercase when local connections, sudo, and raw were used together. + +## 2.0 "Over the Hills and Far Away" - Jan 12, 2016 ###Major Changes: From 99240b2ee15f857bc1f131c0e2cae35061675b47 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 2 Feb 2016 10:51:41 -0800 Subject: [PATCH 0758/1113] Note the dnf fix --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a67f7ad82e4..8643aa1c478 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,9 @@ Ansible Changes By Release disabled pipelining. * Fix fetch module failing even if fail_on_missing is set to False * Fix for cornercase when local connections, sudo, and raw were used together. +* Fix dnf module to remove dependent packages when state=absent is specified. + This was a feature of the 1.9.x version that was left out by mistake when the + module was rewritten for 2.0. ## 2.0 "Over the Hills and Far Away" - Jan 12, 2016 From b897d926c977847b43fd2f99c94a9f9643b8fb1e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 7 Feb 2016 13:11:32 -0800 Subject: [PATCH 0759/1113] Add locale fixes to changelgo --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8643aa1c478..47ca04780d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ Ansible Changes By Release * Fix dnf module to remove dependent packages when state=absent is specified. This was a feature of the 1.9.x version that was left out by mistake when the module was rewritten for 2.0. +* Fix bugs with non-english locales in yum, git, and apt modules ## 2.0 "Over the Hills and Far Away" - Jan 12, 2016 From c1654adf10a7284bf311e8d699822d31fadfaceb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sat, 13 Feb 2016 02:09:08 -0800 Subject: [PATCH 0760/1113] Update changelog for dnf fix --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47ca04780d5..4c46441c7b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ Ansible Changes By Release This was a feature of the 1.9.x version that was left out by mistake when the module was rewritten for 2.0. * Fix bugs with non-english locales in yum, git, and apt modules +* Fix a bug with the dnf module where state=latest could only upgrade, not install. ## 2.0 "Over the Hills and Far Away" - Jan 12, 2016 From 1b11b396ca3e8446715ea2b60853a8f260efa4a2 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 29 Feb 2016 15:51:38 -0500 Subject: [PATCH 0761/1113] added note about fact gathering task change fixes #14655 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c46441c7b0..58b51a8fff4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,9 @@ Ansible Changes By Release module was rewritten for 2.0. * Fix bugs with non-english locales in yum, git, and apt modules * Fix a bug with the dnf module where state=latest could only upgrade, not install. +* Fix to make implicit fact gathering task correctly inherit settings from play, + this might cause an error if settings environment on play depending on 'ansible_env' + which was previouslly ignored ## 2.0 "Over the Hills and Far Away" - Jan 12, 2016 From 8766a9dbafc31e304a07dc87b91d30df428d0a2b Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 29 Feb 2016 15:56:39 -0500 Subject: [PATCH 0762/1113] added new gather behaviour to porting guide also removed redundant header from changelog from merge --- CHANGELOG.md | 1 - docsite/rst/porting_guide_2.0.rst | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 58b51a8fff4..3676a04952c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,7 +27,6 @@ Ansible Changes By Release * callbacks now have access to the options with which the CLI was called * debug is now controlable with verbosity -## 2.0 "Over the Hills and Far Away" ## 2.0.1 "Over the Hills and Far Away" * Fixes a major compatibility break in the synchronize module shipped with diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 8c95c9a4d06..ee6e65c150f 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -83,6 +83,8 @@ uses key=value escaping which has not changed. The other option is to check for * Extras callbacks must be whitelisted in ansible.cfg. Copying is no longer necessary but whitelisting in ansible.cfg must be completed. * dnf module has been rewritten. Some minor changes in behavior may be observed. * win_updates has been rewritten and works as expected now. +* from 2.0.1 onwards, the implicit setup task from gather_facts now correctly inherits everything from play, but this might cause issues for those setting + `environment` at the play level and depending on `ansible_env` existing. Previouslly this was ignored but now might issue an 'Undefined' error. Deprecated ---------- From badc922c73e12eff21fd84a8b004f8997018367e Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 29 Feb 2016 16:18:06 -0500 Subject: [PATCH 0763/1113] added warning for when host file doesn't exist fixes #14692 --- lib/ansible/inventory/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index d10a731faaf..c984546621e 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -133,6 +133,8 @@ class Inventory(object): if not self.parser: # should never happen, but JIC raise AnsibleError("Unable to parse %s as an inventory source" % host_list) + else: + display.warning("Host file not found: %s" % to_unicode(host_list)) self._vars_plugins = [ x for x in vars_loader.all(self) ] From e9fe5f201f5830167d2ca6168c08822c930efb41 Mon Sep 17 00:00:00 2001 From: Michael Crilly <mrcrilly@gmail.com> Date: Wed, 13 May 2015 15:35:08 +1000 Subject: [PATCH 0764/1113] $SubjectName variable unused; clean up Having used this script several times today, I came to notice the $SubjectName variable, being passed in via the CLI, is essentially ignored when generating the SSL certificates, rendering it useless. I believe it's a good idea to have it in place, so I've updated the script to reflect this. I also cleaned up some random new lines throughout the file, and expanded on a comment. It might be worth going a step further and commenting the file fully, as most people reviewing this file won't be familiar with PowerShell (like I wasn't unitl a few days ago). It could be helpful. --- .../scripts/ConfigureRemotingForAnsible.ps1 | 30 +++++++++---------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/examples/scripts/ConfigureRemotingForAnsible.ps1 b/examples/scripts/ConfigureRemotingForAnsible.ps1 index a67ea8afb2c..a70dc0354df 100644 --- a/examples/scripts/ConfigureRemotingForAnsible.ps1 +++ b/examples/scripts/ConfigureRemotingForAnsible.ps1 @@ -1,4 +1,4 @@ -# Configure a Windows host for remote management with Ansible +# Configure a Windows host for remote management with Ansible # ----------------------------------------------------------- # # This script checks the current WinRM/PSRemoting configuration and makes the @@ -10,9 +10,11 @@ # # Written by Trond Hindenes <trond@hindenes.com> # Updated by Chris Church <cchurch@ansible.com> +# Updated by Michael Crilly <mike@autologic.cm> # # Version 1.0 - July 6th, 2014 # Version 1.1 - November 11th, 2014 +# Version 1.2 - May 15th, 2015 Param ( [string]$SubjectName = $env:COMPUTERNAME, @@ -20,7 +22,6 @@ Param ( $CreateSelfSignedCert = $true ) - Function New-LegacySelfSignedCert { Param ( @@ -60,11 +61,12 @@ Function New-LegacySelfSignedCert $certdata = $enrollment.CreateRequest(0) $enrollment.InstallResponse(2, $certdata, 0, "") - # Return the thumbprint of the last installed cert. + # Return the thumbprint of the last installed certificate; + # This is needed for the new HTTPS WinRM listerner we're + # going to create further down. Get-ChildItem "Cert:\LocalMachine\my"| Sort-Object NotBefore -Descending | Select -First 1 | Select -Expand Thumbprint } - # Setup error handling. Trap { @@ -73,14 +75,12 @@ Trap } $ErrorActionPreference = "Stop" - # Detect PowerShell version. If ($PSVersionTable.PSVersion.Major -lt 3) { Throw "PowerShell version 3 or higher is required." } - # Find and start the WinRM service. Write-Verbose "Verifying WinRM service." If (!(Get-Service "WinRM")) @@ -93,7 +93,6 @@ ElseIf ((Get-Service "WinRM").Status -ne "Running") Start-Service -Name "WinRM" -ErrorAction Stop } - # WinRM should be running; check that we have a PS session config. If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener))) { @@ -112,17 +111,19 @@ If (!($listeners | Where {$_.Keys -like "TRANSPORT=HTTPS"})) # HTTPS-based endpoint does not exist. If (Get-Command "New-SelfSignedCertificate" -ErrorAction SilentlyContinue) { - $cert = New-SelfSignedCertificate -DnsName $env:COMPUTERNAME -CertStoreLocation "Cert:\LocalMachine\My" + $cert = New-SelfSignedCertificate -DnsName $SubjectName -CertStoreLocation "Cert:\LocalMachine\My" $thumbprint = $cert.Thumbprint + Write-Host "Self-signed SSL certificate generated; thumbprint: $thumbprint" } Else { - $thumbprint = New-LegacySelfSignedCert -SubjectName $env:COMPUTERNAME + $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName + Write-Host "(Legacy) Self-signed SSL certificate generated; thumbprint: $thumbprint" } # Create the hashtables of settings to be used. $valueset = @{} - $valueset.Add('Hostname', $env:COMPUTERNAME) + $valueset.Add('Hostname', $SubjectName) $valueset.Add('CertificateThumbprint', $thumbprint) $selectorset = @{} @@ -137,7 +138,6 @@ Else Write-Verbose "SSL listener is already active." } - # Check for basic authentication. $basicAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where {$_.Name -eq "Basic"} If (($basicAuthSetting.Value) -eq $false) @@ -150,7 +150,6 @@ Else Write-Verbose "Basic auth is already enabled." } - # Configure firewall to allow WinRM HTTPS connections. $fwtest1 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" $fwtest2 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" profile=any @@ -177,19 +176,18 @@ $httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $h If ($httpResult -and $httpsResult) { - Write-Verbose "HTTP and HTTPS sessions are enabled." + Write-Verbose "HTTP: Enabled | HTTPS: Enabled" } ElseIf ($httpsResult -and !$httpResult) { - Write-Verbose "HTTP sessions are disabled, HTTPS session are enabled." + Write-Verbose "HTTP: Disabled | HTTPS: Enabled" } ElseIf ($httpResult -and !$httpsResult) { - Write-Verbose "HTTPS sessions are disabled, HTTP session are enabled." + Write-Verbose "HTTP: Enabled | HTTPS: Disabled" } Else { Throw "Unable to establish an HTTP or HTTPS remoting session." } - Write-Verbose "PS Remoting has been successfully configured for Ansible." From d699ef72d22874413c4c90d38f89a9a16c663154 Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Mon, 29 Feb 2016 14:37:01 -0800 Subject: [PATCH 0765/1113] Fixes #12433 with distro-agnostic text --- docsite/rst/intro_windows.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index bf4256cf34e..062e54fb666 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -28,6 +28,8 @@ On a Linux control machine:: pip install "pywinrm>=0.1.1" +Note:: on distributions with multiple python versions, use pip2 or pip2.x, where x matches the python minor version Ansible is running under. + Active Directory Support ++++++++++++++++++++++++ From e1faa787225cc29bbff807c46a8db60579246233 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 29 Feb 2016 17:52:40 -0500 Subject: [PATCH 0766/1113] fixed typo --- hacking/templates/playbooks_directives.rst.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/templates/playbooks_directives.rst.j2 b/hacking/templates/playbooks_directives.rst.j2 index e54d0d455d4..c01a179c347 100644 --- a/hacking/templates/playbooks_directives.rst.j2 +++ b/hacking/templates/playbooks_directives.rst.j2 @@ -1,7 +1,7 @@ Directives Glossary =================== -Here we list the common playbook objects and the their directives. +Here we list the common playbook objects and their directives. Note that not all directives affect the object itself and might just be there to be inherited by other contained objects. Aliases for the directives are not reflected here, nor are mutable ones, for example `action` in task can be substituted by the name of any module plugin. From ac54f66741645315eea37d36bd8350cea2e16840 Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Mon, 29 Feb 2016 16:30:55 -0800 Subject: [PATCH 0767/1113] don't lock file when calculating checksum --- lib/ansible/module_utils/powershell.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1 index c150b077443..fc1d49f4edb 100644 --- a/lib/ansible/module_utils/powershell.ps1 +++ b/lib/ansible/module_utils/powershell.ps1 @@ -211,7 +211,7 @@ Function Get-FileChecksum($path) If (Test-Path -PathType Leaf $path) { $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; - $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); + $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read, [System.IO.FileShare]::ReadWrite); $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); } From 915037c628ddcdfebd3e266710460376fcfaa91d Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Mon, 29 Feb 2016 16:43:30 -0800 Subject: [PATCH 0768/1113] update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 45367c3d090..0bbb7ba38da 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 45367c3d090ccf4d649b103b50b6eec939b6ee14 +Subproject commit 0bbb7ba38da07d2a9e562834264a2ee2fc9ceaf4 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 479f99678b2..39e4040685b 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 479f99678b267b9c42c9c76504b9c528400eaf70 +Subproject commit 39e4040685bf2c36fd59450ac4f9b40158179f9e From c2ce509aaf4a0fb077b2389dd09e52f945fb6c05 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Fri, 26 Feb 2016 23:12:54 -0500 Subject: [PATCH 0769/1113] bugfixes for openswitch shared module This commit fixes two bugs in the openswitch shared module. The first bug was a wrong argument type for the use_ssl argument. It was set to int and should be bool. The second changes the default ports for http (was 80, now 8091) and https (was 443, now 18091). This change aligns the default port values with the OS --- lib/ansible/module_utils/openswitch.py | 39 ++++++++++++++------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/lib/ansible/module_utils/openswitch.py b/lib/ansible/module_utils/openswitch.py index ba3eb7b44ab..a2fa7ee827e 100644 --- a/lib/ansible/module_utils/openswitch.py +++ b/lib/ansible/module_utils/openswitch.py @@ -35,7 +35,7 @@ NET_COMMON_ARGS = dict( port=dict(type='int'), username=dict(), password=dict(no_log=True), - use_ssl=dict(default=True, type='int'), + use_ssl=dict(default=True, type='bool'), transport=dict(default='ssh', choices=['ssh', 'cli', 'rest']), provider=dict() ) @@ -48,35 +48,38 @@ def to_list(val): else: return list() -def get_idl(): +def get_runconfig(): manager = OvsdbConnectionManager(settings.get('ovs_remote'), settings.get('ovs_schema')) manager.start() - idl = manager.idl - init_seq_no = 0 - while (init_seq_no == idl.change_seqno): - idl.run() + timeout = 10 + interval = 0 + init_seq_no = manager.idl.change_seqno + + while (init_seq_no == manager.idl.change_seqno): + if interval > timeout: + raise TypeError('timeout') + manager.idl.run() + interval += 1 time.sleep(1) - return idl - -def get_schema(): - return restparser.parseSchema(settings.get('ext_schema')) - -def get_runconfig(): - idl = get_idl() - schema = get_schema() - return runconfig.RunConfigUtil(idl, schema) + schema = restparser.parseSchema(settings.get('ext_schema')) + return runconfig.RunConfigUtil(manager.idl, schema) class Response(object): def __init__(self, resp, hdrs): - self.body = resp.read() + self.body = None self.headers = hdrs + if resp: + self.body = resp.read() + @property def json(self): + if not self.body: + return None try: return json.loads(self.body) except ValueError: @@ -95,11 +98,11 @@ class Rest(object): if self.module.params['use_ssl']: proto = 'https' if not port: - port = 443 + port = 18091 else: proto = 'http' if not port: - port = 80 + port = 8091 self.baseurl = '%s://%s:%s/rest/v1' % (proto, host, port) From 42b2077c936dfdce8120ae8430c031a931908ffb Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Mon, 29 Feb 2016 19:24:56 -0500 Subject: [PATCH 0770/1113] Fix bug 14715: Galaxy CLI paging error --- lib/ansible/galaxy/api.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py index eec9ee932e0..3ea51f2891e 100644 --- a/lib/ansible/galaxy/api.py +++ b/lib/ansible/galaxy/api.py @@ -180,12 +180,12 @@ class GalaxyAPI(object): url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related) data = self.__call_galaxy(url) results = data['results'] - done = (data.get('next', None) is None) + done = (data.get('next_link', None) is None) while not done: - url = '%s%s' % (self.baseurl, data['next']) + url = '%s%s' % (self._api_server, data['next_link']) data = self.__call_galaxy(url) results += data['results'] - done = (data.get('next', None) is None) + done = (data.get('next_link', None) is None) return results except: return None @@ -203,12 +203,12 @@ class GalaxyAPI(object): results = data done = True if "next" in data: - done = (data.get('next', None) is None) + done = (data.get('next_link', None) is None) while not done: - url = '%s%s' % (self.baseurl, data['next']) + url = '%s%s' % (self._api_server, data['next_link']) data = self.__call_galaxy(url) results += data['results'] - done = (data.get('next', None) is None) + done = (data.get('next_link', None) is None) return results except Exception as error: raise AnsibleError("Failed to download the %s list: %s" % (what, str(error))) From d033c40e8088ee44d29b2ddf3ca228a536147b36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A4=8F=E6=81=BA=28Xia=20Kai=29?= <xiaket@gmail.com> Date: Tue, 1 Mar 2016 06:03:22 +0000 Subject: [PATCH 0771/1113] =?UTF-8?q?Make=20task=20repr=20really=20work=20?= =?UTF-8?q?for=20meta=20tasks.=20Signed-off-by:=20=E5=A4=8F=E6=81=BA(Xia?= =?UTF-8?q?=20Kai)=20<xiaket@xiaket@gmail.com>?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- lib/ansible/playbook/task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 4328602f593..56f3a15bf8e 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -133,7 +133,7 @@ class Task(Base, Conditional, Taggable, Become): def __repr__(self): ''' returns a human readable representation of the task ''' - if self.get_name() == 'meta ': + if self.get_name() == 'meta': return "TASK: meta (%s)" % self.args['_raw_params'] else: return "TASK: %s" % self.get_name() From 565454966473ed264aba76566f34bbe2d78a8866 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= <mail@renemoser.net> Date: Tue, 1 Mar 2016 08:02:48 +0100 Subject: [PATCH 0772/1113] doc: yumrepo renamed to yum_repository --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3676a04952c..089fef5faa8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,7 @@ Ansible Changes By Release * aws: ec2_vpc_dhcp_options.py * aws: ec2_vpc_net_facts * cloudstack: cs_volume -* yumrepo +* yum_repository ####New Filters: * extract From 6c641fb6a87e24d4e6aa06181712a979435c1cce Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Tue, 2 Feb 2016 18:57:45 +0100 Subject: [PATCH 0773/1113] cloudstack: add CS_HYPERVISORS constant --- lib/ansible/module_utils/cloudstack.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index d9b29fefe7a..c27116c384a 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -35,6 +35,18 @@ try: except ImportError: has_lib_cs = False +CS_HYPERVISORS = [ + "KVM", "kvm", + "VMware", "vmware", + "BareMetal", "baremetal", + "XenServer", "xenserver", + "LXC", "lxc", + "HyperV", "hyperv", + "UCS", "ucs", + "OVM", "ovm", + "Simulator", "simulator", + ] + def cs_argument_spec(): return dict( api_key = dict(default=None), From d6716a2f9054f3426d558576d9e3ebf7937aa888 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 1 Mar 2016 09:16:21 -0500 Subject: [PATCH 0774/1113] noted issues with setup now inheriting environment --- docsite/rst/faq.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index 727f4fb1501..2bf2b96ec50 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -15,6 +15,7 @@ Setting environment variables can be done with the `environment` keyword. It can PATH: "{{ ansible_env.PATH }}:/thingy/bin" SOME: value +.. note:: starting in 2.0.1 the setup task from gather_facts also inherits the environment directive from the play, you might need to use the `|default` filter to avoid errors if setting this at play level. How do I handle different machines needing different user accounts or ports to log in with? From c1785f56d5eaa067a6c84a9cada134e2cb2ae627 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 1 Mar 2016 12:43:31 -0500 Subject: [PATCH 0775/1113] added 2 new modules to changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 089fef5faa8..6b2b213e6e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,8 +12,11 @@ Ansible Changes By Release * aws: ec2_vpc_dhcp_options.py * aws: ec2_vpc_net_facts * cloudstack: cs_volume +* win_regmerge +* win_timezone * yum_repository + ####New Filters: * extract From 70e7be0346d69be38e56ef683a859935123a7789 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 1 Mar 2016 12:43:52 -0500 Subject: [PATCH 0776/1113] added clarification on moustaches --- docsite/rst/faq.rst | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index 2bf2b96ec50..ab8e5bc201c 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -321,7 +321,30 @@ The no_log attribute can also apply to an entire play:: no_log: True Though this will make the play somewhat difficult to debug. It's recommended that this -be applied to single tasks only, once a playbook is completed. +be applied to single tasks only, once a playbook is completed. + + +.. _when_to_use_brackets: +.. _dynamic_variables: +.. _interpolate_variables: + +When should I use {{ }}? Also, howto interpolate variables or dyanmic variable names +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +A steadfast rule is 'always use {{ }} except when `when:`'. +Conditionals are always run through Jinja2 as to resolve the expression, +so `when:`, `failed_when:` and `changed_when:` are always templated and you should avoid adding `{{}}`. + +In most other cases you should always use the brackets, even if previouslly you could use variables without specifying (like `with_` clauses), +as this made it hard to distinguish between an undefined variable and a string. + +Another rule is 'moustaches don't stack'. We often see this:: + + {{ somvar_{{other_var}} }} + +The above DOES NOT WORK, if you need to use a dynamic variable use the hostvars or vars dictionary as appropriate:: + + {{ hostvars[inventory_hostname]['somevar_' + other_var] }} .. _i_dont_see_my_question: From e011f52557a243478613a50d21bbffa22248a623 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 1 Mar 2016 13:52:50 -0500 Subject: [PATCH 0777/1113] Expanding unit tests for module_utils/basic.py --- lib/ansible/module_utils/basic.py | 11 +- test/units/module_utils/test_basic.py | 368 +++++++++++++++++++++-- test/units/plugins/action/test_action.py | 7 +- 3 files changed, 348 insertions(+), 38 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index df591444d99..e1eefb9c60b 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -185,7 +185,7 @@ except ImportError: pass try: - from ast import literal_eval as _literal_eval + from ast import literal_eval except ImportError: # a replacement for literal_eval that works with python 2.4. from: # https://mail.python.org/pipermail/python-list/2009-September/551880.html @@ -193,7 +193,7 @@ except ImportError: # ast.py from compiler import ast, parse - def _literal_eval(node_or_string): + def literal_eval(node_or_string): """ Safely evaluate an expression node or a string containing a Python expression. The string or node provided may only consist of the following @@ -223,6 +223,7 @@ except ImportError: raise ValueError('malformed string') return _convert(node_or_string) +_literal_eval = literal_eval FILE_COMMON_ARGUMENTS=dict( src = dict(), @@ -1254,9 +1255,9 @@ class AnsibleModule(object): try: result = None if not locals: - result = _literal_eval(str) + result = literal_eval(str) else: - result = _literal_eval(str, None, locals) + result = literal_eval(str, None, locals) if include_exceptions: return (result, None) else: @@ -1749,7 +1750,7 @@ class AnsibleModule(object): prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file) except (OSError, IOError): e = get_exception() - self.fail_json(msg='The destination directory (%s) is not writable by the current user.' % dest_dir) + self.fail_json(msg='The destination directory (%s) is not writable by the current user. Error was: %s' % (dest_dir, e)) try: # leaves tmp file behind when sudo and not root if switched_user and os.getuid() != 0: diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py index 86473dd2037..04556736b62 100644 --- a/test/units/module_utils/test_basic.py +++ b/test/units/module_utils/test_basic.py @@ -21,12 +21,18 @@ from __future__ import (absolute_import, division) __metaclass__ = type import errno +import os import sys -from six.moves import builtins +try: + import builtins +except ImportError: + import __builtin__ as builtins from ansible.compat.tests import unittest -from ansible.compat.tests.mock import patch, MagicMock, mock_open, Mock +from ansible.compat.tests.mock import patch, MagicMock, mock_open, Mock, call + +realimport = builtins.__import__ class TestModuleUtilsBasic(unittest.TestCase): @@ -36,17 +42,106 @@ class TestModuleUtilsBasic(unittest.TestCase): def tearDown(self): pass - def test_module_utils_basic_imports(self): - realimport = builtins.__import__ + def clear_modules(self, mods): + for mod in mods: + if mod in sys.modules: + del sys.modules[mod] + @patch.object(builtins, '__import__') + def test_module_utils_basic_import_syslog(self, mock_import): + def _mock_import(name, *args, **kwargs): + if name == 'syslog': + raise ImportError + return realimport(name, *args, **kwargs) + + self.clear_modules(['syslog', 'ansible.module_utils.basic']) + mod = builtins.__import__('ansible.module_utils.basic') + self.assertTrue(mod.module_utils.basic.HAS_SYSLOG) + + self.clear_modules(['syslog', 'ansible.module_utils.basic']) + mock_import.side_effect = _mock_import + mod = builtins.__import__('ansible.module_utils.basic') + self.assertFalse(mod.module_utils.basic.HAS_SYSLOG) + + @patch.object(builtins, '__import__') + def test_module_utils_basic_import_selinux(self, mock_import): + def _mock_import(name, *args, **kwargs): + if name == 'selinux': + raise ImportError + return realimport(name, *args, **kwargs) + + try: + self.clear_modules(['selinux', 'ansible.module_utils.basic']) + mod = builtins.__import__('ansible.module_utils.basic') + self.assertTrue(mod.module_utils.basic.HAVE_SELINUX) + except ImportError: + # no selinux on test system, so skip + pass + + self.clear_modules(['selinux', 'ansible.module_utils.basic']) + mock_import.side_effect = _mock_import + mod = builtins.__import__('ansible.module_utils.basic') + self.assertFalse(mod.module_utils.basic.HAVE_SELINUX) + + @patch.object(builtins, '__import__') + def test_module_utils_basic_import_json(self, mock_import): def _mock_import(name, *args, **kwargs): if name == 'json': - raise ImportError() - realimport(name, *args, **kwargs) + raise ImportError + return realimport(name, *args, **kwargs) - with patch.object(builtins, '__import__', _mock_import, create=True) as m: - m('ansible.module_utils.basic') - builtins.__import__('ansible.module_utils.basic') + self.clear_modules(['json', 'ansible.module_utils.basic']) + mod = builtins.__import__('ansible.module_utils.basic') + + self.clear_modules(['json', 'ansible.module_utils.basic']) + mock_import.side_effect = _mock_import + mod = builtins.__import__('ansible.module_utils.basic') + + @patch.object(builtins, '__import__') + def test_module_utils_basic_import_literal_eval(self, mock_import): + def _mock_import(name, *args, **kwargs): + try: + fromlist = kwargs.get('fromlist', args[2]) + except IndexError: + fromlist = [] + if name == 'ast' and 'literal_eval' in fromlist: + raise ImportError + return realimport(name, *args, **kwargs) + + mock_import.side_effect = _mock_import + del sys.modules['ast'] + del sys.modules['ansible.module_utils.basic'] + mod = builtins.__import__('ansible.module_utils.basic') + self.assertEqual(mod.module_utils.basic.literal_eval("'1'"), "1") + self.assertEqual(mod.module_utils.basic.literal_eval("1"), 1) + self.assertEqual(mod.module_utils.basic.literal_eval("-1"), -1) + self.assertEqual(mod.module_utils.basic.literal_eval("(1,2,3)"), (1,2,3)) + self.assertEqual(mod.module_utils.basic.literal_eval("[1]"), [1]) + self.assertEqual(mod.module_utils.basic.literal_eval("True"), True) + self.assertEqual(mod.module_utils.basic.literal_eval("False"), False) + self.assertEqual(mod.module_utils.basic.literal_eval("None"), None) + #self.assertEqual(mod.module_utils.basic.literal_eval('{"a": 1}'), dict(a=1)) + self.assertRaises(ValueError, mod.module_utils.basic.literal_eval, "asdfasdfasdf") + + @patch.object(builtins, '__import__') + def test_module_utils_basic_import_systemd_journal(self, mock_import): + def _mock_import(name, *args, **kwargs): + try: + fromlist = kwargs.get('fromlist', args[2]) + except IndexError: + fromlist = [] + if name == 'systemd' and 'journal' in fromlist: + raise ImportError + return realimport(name, *args, **kwargs) + + self.clear_modules(['systemd', 'ansible.module_utils.basic']) + mod = builtins.__import__('ansible.module_utils.basic') + self.assertTrue(mod.module_utils.basic.has_journal) + + self.clear_modules(['systemd', 'ansible.module_utils.basic']) + mock_import.side_effect = _mock_import + mod = builtins.__import__('ansible.module_utils.basic') + self.assertFalse(mod.module_utils.basic.has_journal) def test_module_utils_basic_get_platform(self): with patch('platform.system', return_value='foo'): @@ -60,19 +155,19 @@ class TestModuleUtilsBasic(unittest.TestCase): self.assertEqual(get_distribution(), None) with patch('platform.system', return_value='Linux'): - with patch('platform.linux_distribution', return_value=("foo", "1", "One")): + with patch('platform.linux_distribution', return_value=["foo"]): self.assertEqual(get_distribution(), "Foo") with patch('os.path.isfile', return_value=True): - def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1): - if supported_dists != (): - return ("AmazonFooBar", "", "") - else: - return ("", "", "") - - with patch('platform.linux_distribution', side_effect=_dist): + with patch('platform.linux_distribution', side_effect=[("AmazonFooBar",)]): self.assertEqual(get_distribution(), "Amazonfoobar") + with patch('platform.linux_distribution', side_effect=(("",), ("AmazonFooBam",))): + self.assertEqual(get_distribution(), "Amazon") + + with patch('platform.linux_distribution', side_effect=[("",),("",)]): + self.assertEqual(get_distribution(), "OtherLinux") + def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1): if supported_dists != (): return ("Bar", "2", "Two") @@ -678,17 +773,230 @@ class TestModuleUtilsBasic(unittest.TestCase): self.assertEqual(am.set_mode_if_different('/path/to/file', 0o660, False), True) am.check_mode = False - # FIXME: this isn't working yet - #with patch('os.lstat', side_effect=[mock_stat1, mock_stat2]): - # with patch('os.lchmod', return_value=None) as m_os: - # del m_os.lchmod - # with patch('os.path.islink', return_value=False): - # with patch('os.chmod', return_value=None) as m_chmod: - # self.assertEqual(am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False), True) - # m_chmod.assert_called_with('/path/to/file', 0o660) - # with patch('os.path.islink', return_value=True): - # with patch('os.chmod', return_value=None) as m_chmod: - # with patch('os.stat', return_value=mock_stat2): - # self.assertEqual(am.set_mode_if_different('/path/to/file', 0o660, False), True) - # m_chmod.assert_called_with('/path/to/file', 0o660) + original_hasattr = hasattr + def _hasattr(obj, name): + if obj == os and name == 'lchmod': + return False + return original_hasattr(obj, name) + + # FIXME: this isn't working yet + with patch('os.lstat', side_effect=[mock_stat1, mock_stat2]): + with patch.object(builtins, 'hasattr', side_effect=_hasattr): + with patch('os.path.islink', return_value=False): + with patch('os.chmod', return_value=None) as m_chmod: + self.assertEqual(am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False), True) + with patch('os.lstat', side_effect=[mock_stat1, mock_stat2]): + with patch.object(builtins, 'hasattr', side_effect=_hasattr): + with patch('os.path.islink', return_value=True): + with patch('os.chmod', return_value=None) as m_chmod: + with patch('os.stat', return_value=mock_stat2): + self.assertEqual(am.set_mode_if_different('/path/to/file', 0o660, False), True) + + @patch('tempfile.NamedTemporaryFile') + @patch('os.umask') + @patch('shutil.copyfileobj') + @patch('shutil.move') + @patch('shutil.copy2') + @patch('os.rename') + @patch('pwd.getpwuid') + @patch('os.getuid') + @patch('os.environ') + @patch('os.getlogin') + @patch('os.chown') + @patch('os.chmod') + @patch('os.stat') + @patch('os.path.exists') + def test_module_utils_basic_ansible_module_atomic_move( + self, + _os_path_exists, + _os_stat, + _os_chmod, + _os_chown, + _os_getlogin, + _os_environ, + _os_getuid, + _pwd_getpwuid, + _os_rename, + _shutil_copy2, + _shutil_move, + _shutil_copyfileobj, + _os_umask, + _tempfile_NamedTemporaryFile, + ): + + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + environ = dict() + _os_environ.__getitem__ = environ.__getitem__ + _os_environ.__setitem__ = environ.__setitem__ + + am.selinux_enabled = MagicMock() + am.selinux_context = MagicMock() + am.selinux_default_context = MagicMock() + am.set_context_if_different = MagicMock() + + # test destination does not exist, no selinux, login name = 'root', + # no environment, os.rename() succeeds + _os_path_exists.side_effect = [False, False] + _os_getlogin.return_value = 'root' + _os_getuid.return_value = 0 + _pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '') + _os_rename.return_value = None + _os_umask.side_effect = [18, 0] + am.selinux_enabled.return_value = False + _os_chmod.reset_mock() + _os_chown.reset_mock() + am.set_context_if_different.reset_mock() + am.atomic_move('/path/to/src', '/path/to/dest') + _os_rename.assert_called_with('/path/to/src', '/path/to/dest') + self.assertEqual(_os_chmod.call_args_list, [call('/path/to/dest', basic.DEFAULT_PERM & ~18)]) + + # same as above, except selinux_enabled + _os_path_exists.side_effect = [False, False] + _os_getlogin.return_value = 'root' + _os_getuid.return_value = 0 + _pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '') + _os_rename.return_value = None + _os_umask.side_effect = [18, 0] + mock_context = MagicMock() + am.selinux_default_context.return_value = mock_context + am.selinux_enabled.return_value = True + _os_chmod.reset_mock() + _os_chown.reset_mock() + am.set_context_if_different.reset_mock() + am.selinux_default_context.reset_mock() + am.atomic_move('/path/to/src', '/path/to/dest') + _os_rename.assert_called_with('/path/to/src', '/path/to/dest') + self.assertEqual(_os_chmod.call_args_list, [call('/path/to/dest', basic.DEFAULT_PERM & ~18)]) + self.assertEqual(am.selinux_default_context.call_args_list, [call('/path/to/dest')]) + self.assertEqual(am.set_context_if_different.call_args_list, [call('/path/to/dest', mock_context, False)]) + + # now with dest present, no selinux, also raise OSError when using + # os.getlogin() to test corner case with no tty + _os_path_exists.side_effect = [True, True] + _os_getlogin.side_effect = OSError() + _os_getuid.return_value = 0 + _pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '') + _os_rename.return_value = None + _os_umask.side_effect = [18, 0] + environ['LOGNAME'] = 'root' + stat1 = MagicMock() + stat1.st_mode = 0o0644 + stat1.st_uid = 0 + stat1.st_gid = 0 + _os_stat.side_effect = [stat1,] + am.selinux_enabled.return_value = False + _os_chmod.reset_mock() + _os_chown.reset_mock() + am.set_context_if_different.reset_mock() + am.atomic_move('/path/to/src', '/path/to/dest') + _os_rename.assert_called_with('/path/to/src', '/path/to/dest') + + # dest missing, selinux enabled + _os_path_exists.side_effect = [True, True] + _os_getlogin.return_value = 'root' + _os_getuid.return_value = 0 + _pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '') + _os_rename.return_value = None + _os_umask.side_effect = [18, 0] + stat1 = MagicMock() + stat1.st_mode = 0o0644 + stat1.st_uid = 0 + stat1.st_gid = 0 + _os_stat.side_effect = [stat1,] + mock_context = MagicMock() + am.selinux_context.return_value = mock_context + am.selinux_enabled.return_value = True + _os_chmod.reset_mock() + _os_chown.reset_mock() + am.set_context_if_different.reset_mock() + am.selinux_default_context.reset_mock() + am.atomic_move('/path/to/src', '/path/to/dest') + _os_rename.assert_called_with('/path/to/src', '/path/to/dest') + self.assertEqual(am.selinux_context.call_args_list, [call('/path/to/dest')]) + self.assertEqual(am.set_context_if_different.call_args_list, [call('/path/to/dest', mock_context, False)]) + + # now testing with exceptions raised + # have os.stat raise OSError which is not EPERM + _os_stat.side_effect = OSError() + _os_path_exists.side_effect = [True, True] + _os_getlogin.return_value = 'root' + _os_getuid.return_value = 0 + _pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '') + _os_rename.return_value = None + _os_umask.side_effect = [18, 0] + self.assertRaises(OSError, am.atomic_move, '/path/to/src', '/path/to/dest') + + # and now have os.stat return EPERM, which should not fail + _os_stat.side_effect = OSError(errno.EPERM, 'testing os stat with EPERM') + _os_path_exists.side_effect = [True, True] + _os_getlogin.return_value = 'root' + _os_getuid.return_value = 0 + _pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '') + _os_rename.return_value = None + _os_umask.side_effect = [18, 0] + # FIXME: we don't assert anything here yet + am.atomic_move('/path/to/src', '/path/to/dest') + + # now we test os.rename() raising errors... + # first we test with a bad errno to verify it bombs out + _os_path_exists.side_effect = [False, False] + _os_getlogin.return_value = 'root' + _os_getuid.return_value = 0 + _pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '') + _os_umask.side_effect = [18, 0] + _os_rename.side_effect = OSError(errno.EIO, 'failing with EIO') + self.assertRaises(SystemExit, am.atomic_move, '/path/to/src', '/path/to/dest') + + # next we test with EPERM so it continues to the alternate code for moving + # test with NamedTemporaryFile raising an error first + _os_path_exists.side_effect = [False, False] + _os_getlogin.return_value = 'root' + _os_getuid.return_value = 0 + _pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '') + _os_umask.side_effect = [18, 0] + _os_rename.side_effect = [OSError(errno.EPERM, 'failing with EPERM'), None] + _tempfile_NamedTemporaryFile.return_value = None + _tempfile_NamedTemporaryFile.side_effect = OSError() + am.selinux_enabled.return_value = False + self.assertRaises(SystemExit, am.atomic_move, '/path/to/src', '/path/to/dest') + + # then test with it creating a temp file + _os_path_exists.side_effect = [False, False] + _os_getlogin.return_value = 'root' + _os_getuid.return_value = 0 + _pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '') + _os_umask.side_effect = [18, 0] + _os_rename.side_effect = [OSError(errno.EPERM, 'failing with EPERM'), None] + mock_stat1 = MagicMock() + mock_stat2 = MagicMock() + mock_stat3 = MagicMock() + _os_stat.return_value = [mock_stat1, mock_stat2, mock_stat3] + _os_stat.side_effect = None + mock_tempfile = MagicMock() + mock_tempfile.name = '/path/to/tempfile' + _tempfile_NamedTemporaryFile.return_value = mock_tempfile + _tempfile_NamedTemporaryFile.side_effect = None + am.selinux_enabled.return_value = False + # FIXME: we don't assert anything here yet + am.atomic_move('/path/to/src', '/path/to/dest') + + # same as above, but with selinux enabled + _os_path_exists.side_effect = [False, False] + _os_getlogin.return_value = 'root' + _os_getuid.return_value = 0 + _pwd_getpwuid.return_value = ('root', '', 0, 0, '', '', '') + _os_umask.side_effect = [18, 0] + _os_rename.side_effect = [OSError(errno.EPERM, 'failing with EPERM'), None] + mock_tempfile = MagicMock() + _tempfile_NamedTemporaryFile.return_value = mock_tempfile + mock_context = MagicMock() + am.selinux_default_context.return_value = mock_context + am.selinux_enabled.return_value = True + am.atomic_move('/path/to/src', '/path/to/dest') diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index 85ac29e4ca6..ea44e315642 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -27,10 +27,11 @@ import pipes import os from sys import version_info -if version_info[0] == 2: - import __builtin__ as builtins -else: + +try: import builtins +except ImportError: + import __builtin__ as builtins from ansible import __version__ as ansible_version from ansible import constants as C From 1de9357c2fc27114611c6c027f13654e66dcfee9 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 1 Mar 2016 14:06:58 -0500 Subject: [PATCH 0778/1113] Attempt at cleaning up issues introduced by new basic.py unit tests --- test/units/module_utils/test_basic.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py index 04556736b62..3088ba67530 100644 --- a/test/units/module_utils/test_basic.py +++ b/test/units/module_utils/test_basic.py @@ -88,6 +88,8 @@ class TestModuleUtilsBasic(unittest.TestCase): def _mock_import(name, *args, **kwargs): if name == 'json': raise ImportError + elif name == 'simplejson': + return MagicMock() return realimport(name, *args, **kwargs) self.clear_modules(['json', 'ansible.module_utils.basic']) @@ -109,8 +111,7 @@ class TestModuleUtilsBasic(unittest.TestCase): return realimport(name, *args, **kwargs) mock_import.side_effect = _mock_import - del sys.modules['ast'] - del sys.modules['ansible.module_utils.basic'] + self.clear_modules(['ast', 'ansible.module_utils.basic']) mod = builtins.__import__('ansible.module_utils.basic') self.assertEqual(mod.module_utils.basic.literal_eval("'1'"), "1") self.assertEqual(mod.module_utils.basic.literal_eval("1"), 1) From 0ea3a9d08d247b833c89aa9581b77a4716b0f5c7 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 1 Mar 2016 14:15:15 -0500 Subject: [PATCH 0779/1113] Removing tabs from test_basic.py --- test/units/module_utils/test_basic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py index 3088ba67530..424abd8fcf9 100644 --- a/test/units/module_utils/test_basic.py +++ b/test/units/module_utils/test_basic.py @@ -117,9 +117,9 @@ class TestModuleUtilsBasic(unittest.TestCase): self.assertEqual(mod.module_utils.basic.literal_eval("1"), 1) self.assertEqual(mod.module_utils.basic.literal_eval("-1"), -1) self.assertEqual(mod.module_utils.basic.literal_eval("(1,2,3)"), (1,2,3)) - self.assertEqual(mod.module_utils.basic.literal_eval("[1]"), [1]) + self.assertEqual(mod.module_utils.basic.literal_eval("[1]"), [1]) self.assertEqual(mod.module_utils.basic.literal_eval("True"), True) - self.assertEqual(mod.module_utils.basic.literal_eval("False"), False) + self.assertEqual(mod.module_utils.basic.literal_eval("False"), False) self.assertEqual(mod.module_utils.basic.literal_eval("None"), None) #self.assertEqual(mod.module_utils.basic.literal_eval('{"a": 1}'), dict(a=1)) self.assertRaises(ValueError, mod.module_utils.basic.literal_eval, "asdfasdfasdf") From 18240d350c36a80318ba05cd4db3d86d9ed5d0f8 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 1 Mar 2016 14:33:05 -0500 Subject: [PATCH 0780/1113] Adding a skip for the literal_eval test on py3 --- test/units/module_utils/test_basic.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py index 424abd8fcf9..b69dc0294d8 100644 --- a/test/units/module_utils/test_basic.py +++ b/test/units/module_utils/test_basic.py @@ -100,6 +100,7 @@ class TestModuleUtilsBasic(unittest.TestCase): mod = builtins.__import__('ansible.module_utils.basic') @patch.object(builtins, '__import__') + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_module_utils_basic_import_literal_eval(self, mock_import): def _mock_import(name, *args, **kwargs): try: From 71402abf2162f951000e1ce8e5210f39a602bf6d Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 1 Mar 2016 15:11:04 -0500 Subject: [PATCH 0781/1113] only do squash when 'squashable field' present --- lib/ansible/executor/task_executor.py | 45 ++++++++++++++------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index b59295b0ed7..a0881cc222b 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -279,29 +279,30 @@ class TaskExecutor: # This gets the information to check whether the name field # contains a template that we can squash for template_no_item = template_with_item = None - if templar._contains_vars(name): - variables['item'] = '\0$' - template_no_item = templar.template(name, variables, cache=False) - variables['item'] = '\0@' - template_with_item = templar.template(name, variables, cache=False) - del variables['item'] + if name: + if templar._contains_vars(name): + variables['item'] = '\0$' + template_no_item = templar.template(name, variables, cache=False) + variables['item'] = '\0@' + template_with_item = templar.template(name, variables, cache=False) + del variables['item'] - # Check if the user is doing some operation that doesn't take - # name/pkg or the name/pkg field doesn't have any variables - # and thus the items can't be squashed - if name and (template_no_item != template_with_item): - for item in items: - variables['item'] = item - if self._task.evaluate_conditional(templar, variables): - new_item = templar.template(name, cache=False) - final_items.append(new_item) - self._task.args['name'] = final_items - # Wrap this in a list so that the calling function loop - # executes exactly once - return [final_items] - else: - # Restore the name parameter - self._task.args['name'] = name + # Check if the user is doing some operation that doesn't take + # name/pkg or the name/pkg field doesn't have any variables + # and thus the items can't be squashed + if template_no_item != template_with_item: + for item in items: + variables['item'] = item + if self._task.evaluate_conditional(templar, variables): + new_item = templar.template(name, cache=False) + final_items.append(new_item) + self._task.args['name'] = final_items + # Wrap this in a list so that the calling function loop + # executes exactly once + return [final_items] + else: + # Restore the name parameter + self._task.args['name'] = name #elif: # Right now we only optimize single entries. In the future we # could optimize more types: From b853e932d1bad135acf4805fcc290d99b0c09c78 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 1 Mar 2016 16:41:42 -0500 Subject: [PATCH 0782/1113] Adding unit tests for symbolic mode conversion stuff in basic.py --- test/units/module_utils/test_basic.py | 39 +++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py index b69dc0294d8..61914e67bc6 100644 --- a/test/units/module_utils/test_basic.py +++ b/test/units/module_utils/test_basic.py @@ -1002,3 +1002,42 @@ class TestModuleUtilsBasic(unittest.TestCase): am.selinux_enabled.return_value = True am.atomic_move('/path/to/src', '/path/to/dest') + def test_module_utils_basic_ansible_module__symbolic_mode_to_octal(self): + + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + mock_stat = MagicMock() + + # FIXME: trying many more combinations here would be good + # directory, give full perms to all, then one group at a time + mock_stat.st_mode = 0o040000 + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'a+rwx'), 0o0777) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u+rwx,g+rwx,o+rwx'), 0o0777) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'o+rwx'), 0o0007) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'g+rwx'), 0o0070) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u+rwx'), 0o0700) + + # same as above, but in reverse so removing permissions + mock_stat.st_mode = 0o040777 + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'a-rwx'), 0o0000) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u-rwx,g-rwx,o-rwx'), 0o0000) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'o-rwx'), 0o0770) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'g-rwx'), 0o0707) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u-rwx'), 0o0077) + + # now using absolute assignment + mock_stat.st_mode = 0o040000 + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'a=rwx'), 0o0777) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u=rwx,g=rwx,o=rwx'), 0o0777) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'o=rwx'), 0o0007) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'g=rwx'), 0o0070) + self.assertEqual(am._symbolic_mode_to_octal(mock_stat, 'u=rwx'), 0o0700) + + # invalid modes + mock_stat.st_mode = 0o0400000 + self.assertRaises(ValueError, am._symbolic_mode_to_octal, mock_stat, 'a=foo') From b559d0e6ee2e7688e3f3170f25aa35603c9e2b03 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 1 Mar 2016 16:55:01 -0500 Subject: [PATCH 0783/1113] Adding py3 stub for reduce from six.moves --- lib/ansible/module_utils/basic.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index e1eefb9c60b..8edb73a14c5 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -113,6 +113,12 @@ else: def iteritems(d): return d.iteritems() +try: + reduce +except NameError: + # Python 3 + from six.moves import reduce + try: NUMBERTYPES = (int, long, float) except NameError: From 9acb5aa176bf71c10975c0a92b8cc225e72d1228 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 1 Mar 2016 17:06:46 -0500 Subject: [PATCH 0784/1113] Changing location of reduce import to not use six.moves --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 8edb73a14c5..8d5963a1f02 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -117,7 +117,7 @@ try: reduce except NameError: # Python 3 - from six.moves import reduce + from functools import reduce try: NUMBERTYPES = (int, long, float) From b2dd22e9f2450bbc83072999ec4b84f62aeaa2b0 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Tue, 1 Mar 2016 10:09:23 -0500 Subject: [PATCH 0785/1113] Updating docker_container_module proposal --- .../docker/docker_container_moduler.md | 53 ++++++++----------- 1 file changed, 22 insertions(+), 31 deletions(-) diff --git a/docs/proposals/docker/docker_container_moduler.md b/docs/proposals/docker/docker_container_moduler.md index f77966e9869..e4a2b80590a 100644 --- a/docs/proposals/docker/docker_container_moduler.md +++ b/docs/proposals/docker/docker_container_moduler.md @@ -4,7 +4,7 @@ The purpose of docker_container is to manage the lifecycle of a container. The module will provide a mechanism for moving the container between absent, present, stopped and started states. It will focus purely on managing container -state. The intention of the narrow focus is to make understanding and using the module clear and to keep maintenance +state. The intention of the narrow focus is to make understanding and using the module clear and keep maintenance and testing as easy as possible. Docker_container will manage a container using docker-py to communicate with either a local or remote API. It will @@ -19,7 +19,7 @@ leading the way. If this project is successful, it will naturally deprecate the Docker_container will accept the parameters listed below. An attempt has been made to represent all the options available to docker's create, kill, pause, run, rm, start, stop and update commands. -Parameters related to connecting to the API are not listed here. +Parameters for connecting to the API are not listed here. They are included in the common utility module mentioned above. ``` blkio_weight: @@ -49,7 +49,7 @@ cgroup_parent: command: description: - - Command executed in the container when it starts. + - Command or list of commands to execute in the container when it starts. default: null cpu_period: @@ -113,12 +113,6 @@ dns_servers: - List of custom DNS servers. default: null -dns_opts: - description: - - List of custom DNS options. Each option is written as an options line - into the container's /etc/resolv.conf. - default: null - dns_search_domains: description: - List of custom DNS search domains. @@ -183,9 +177,11 @@ ipv6_address: - Container IPv6 address. default: null -ipc_namespace: +ipc_mode: description: - - Container IPC namespace. + - Set the IPC mode for the container. Can be one of + 'container:<name|id>' to reuse another container's IPC namespace + or 'host' to use the host's IPC namespace within the container. default: null keep_volumes: @@ -227,10 +223,10 @@ log_driver: - splunk defult: json-file -log_opt: +log_options: description: - - Additional options to pass to the logging driver selected above. See Docker `log-driver - <https://docs.docker.com/reference/logging/overview/>` documentation for more information. + - Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/ + for details. required: false default: null @@ -268,13 +264,14 @@ name: - When identifying an existing container name may be a name or a long or short container ID. required: true -net: +network_mode: description: - Connect the container to a network. choices: - bridge - container:<name|id> - host + - none default: null net_alias: @@ -287,7 +284,7 @@ paused: - Use with the started state to pause running processes inside the container. default: false -pid: +pid_mode: description: - Set the PID namespace mode for the container. Currenly only supports 'host'. default: null @@ -325,16 +322,14 @@ restart_policy: description: - Container restart policy. choices: - - no - on-failure - always - - unless-stopped - default: no + default: on-failure -restart_policy_retry: +restart_retries: description: - - When C(restart_policy) is on-failure sets the max number of retries. - default: 0 + - Use with restart policy to control maximum number of restart attempts. + default: 0 shm_size: description: @@ -419,13 +414,13 @@ volumes: - List of volumes to mount within the container. - 'Use docker CLI-style syntax: C(/host:/container[:mode])' - You can specify a read mode for the mount with either C(ro) or C(rw). - Starting at version 2.1, SELinux hosts can additionally use C(z) or C(Z) - mount options to use a shared or private label for the volume. + - SELinux hosts can additionally use C(z) or C(Z) to use a shared or + private label for the volume. default: null volumes_from: description: - - List of container names to mount volumes from. + - List of container names or Ids to get volumes from. default: null ``` @@ -512,14 +507,10 @@ The JSON object returned by the module will include a *results* object providing ``` { changed: True, + failed: False, + rc: 0 results: { < the results of `docker inspect` > } } ``` - -## Contributors - -[chouseknecht](http://twitter.com/chouseknecht) - -*Last Updated:* 2016-02-24 From 804c6f9c09e4d0004f5f69fe734e531313cf4ba8 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Wed, 2 Mar 2016 00:00:31 -0500 Subject: [PATCH 0786/1113] Adding docker_volume module proposal --- docs/proposals/docker/docker_files_module.md | 82 ++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 docs/proposals/docker/docker_files_module.md diff --git a/docs/proposals/docker/docker_files_module.md b/docs/proposals/docker/docker_files_module.md new file mode 100644 index 00000000000..4a9d5e47a81 --- /dev/null +++ b/docs/proposals/docker/docker_files_module.md @@ -0,0 +1,82 @@ +# Docker_Volume Modules Proposal + +## Purpose and Scope + +The purpose of docker_volume is to manage volumes. + +Docker_volume will manage volumes using docker-py to communicate with either a local or remote API. It will +support API versions >= 1.14. API connection details will be handled externally in a shared utility module similar +to how other cloud modules operate. + +## Parameters + +Docker_volume accepts the parameters listed below. Parameters for connecting to the API are not listed here, as they +will be part of the shared module mentioned above. + +``` +driver: + description: + - Volume driver. + default: local + +force: + description: + - Use with state 'present' to force removal and re-creation of an existing volume. This will not remove and + re-create the volume if it is already in use. + +name: + description: + - Name of the volume. + required: true + default: null + +options: + description: + - Dictionary of driver specific options. The local driver does not currently support + any options. + default: null + +state: + description: + - "absent" removes a volume. A volume cannot be removed if it is in use. + - "present" create a volume with the specified name, if the volume does not already exist. Use the force + option to remove and re-create a volume. Even with the force option a volume cannot be removed and re-created if + it is in use. + default: present + choices: + - absent + - present +``` + +## Examples + +``` +- name: Create a volume + docker_volume: + name: data + +- name: Remove a volume + docker_volume: + name: data + state: absent + +- name: Re-create an existing volume + docker_volume: + name: data + state: present + force: yes +``` + +## Returns + +``` +{ + changed: true, + failed: false, + rc: 0, + action: removed | created | none + results: { + < show the result of docker inspect of an affected volume > + } +} +``` \ No newline at end of file From 76766aac8b9b1890f6a22deb8a3ba77500b9a245 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Tue, 1 Mar 2016 22:50:30 -0500 Subject: [PATCH 0787/1113] Adding docker_network module proposal. --- .../proposals/docker/docker_network_module.md | 130 ++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 docs/proposals/docker/docker_network_module.md diff --git a/docs/proposals/docker/docker_network_module.md b/docs/proposals/docker/docker_network_module.md new file mode 100644 index 00000000000..4b07e3072dd --- /dev/null +++ b/docs/proposals/docker/docker_network_module.md @@ -0,0 +1,130 @@ +# Docker_Network Module Proposal + +## Purpose and Scope: + +The purpose of Docker_network is to create networks, connect containers to networks, disconnect containers from +networks, and delete networks. + +Docker network will manage networks using docker-py to communicate with either a local or remote API. It will +support API versions >= 1.14. API connection details will be handled externally in a shared utility module similar to +how other cloud modules operate. + +## Parameters: + +Docker_network will accept the parameters listed below. Parameters related to connecting to the API will be handled in +a shared utility module, as mentioned above. + +``` +containers: + +network_name: + description: + - Name of the network to operate on. + default: null + required: true + +driver: + description: + - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used. + default: bridge + +options: + description: + - Dictionary of network settings. Consult docker docs for valid options and values. + default: null + +connected: + description: + - List of container names or container IDs to connect to a network. + default: null + +disconnected: + description: + - List of container names or container IDs to disconnect from a network. + default: null + +disconnect_all: + description: + - Disconnect all containers, unless the containers is in the provided list of connected containers. If no + list of connected containers is provided, all containers will be disconnnected. + default: false + +force: + description: + - With state 'absent' forces disconnecting all containers from the network prior to deleting the network. With + state 'present' will disconnect all containers, delete the network and re-create the network. + default: false + +state: + description: + - "absent" deletes the network. If a network has connected containers, it cannot be deleted. Use the force option + to disconnect all containers and delete the network. + - "present" creates the network, if it does not already exist with the specified parameters, and connects the list + of containers provided via the connected parameter. Use disconnected to remove a set of containers from the + network. Use disconnect_all to remove from the network any containers not included in the containers parameter. + If disconnected is provided with no list of connected parameter, all containers will be removed from the + network. Use the force options to force the re-creation of the network. + default: present + choices: + - absent + - present + +``` + + +## Examples: + +``` +- name: Create a network + docker_network: + name: network_one + +- name: Remove all but selected list of containers + docker_network: + name: network_one + connected: + - containera + - containerb + disconnect_all: yes + +- name: Remove a container from the network + docker_network: + name: network_one + disconnected: + - containerb + +- name: Delete a network, disconnected all containers + docker_network: + name: network_one + state: absent + force: yes + +- name: Add a container to a network + docker_network: + name: network_one + connected: + - containerc + +- name: Create a network with options (Not sure if 'ip_range' is correct name) + docker_network + name: network_two + options: + subnet: '172.3.26.0/16' + gateway: 172.3.26.1 + ip_range: '192.168.1.0/24' + +``` + +## Returns: + +``` +{ + changed: True, + failed: false + rc: 0 + action: created | removed | none + results: { + < results from docker inspect for the affected network > + } +} +``` \ No newline at end of file From 3e1332495b12fd076eb38d95cfab650885383014 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Tue, 1 Mar 2016 12:55:22 -0500 Subject: [PATCH 0788/1113] Upated docker_image_module proposal --- docs/proposals/docker/docker_image_module.md | 207 +++++++++++++++++++ 1 file changed, 207 insertions(+) create mode 100644 docs/proposals/docker/docker_image_module.md diff --git a/docs/proposals/docker/docker_image_module.md b/docs/proposals/docker/docker_image_module.md new file mode 100644 index 00000000000..9284ca96692 --- /dev/null +++ b/docs/proposals/docker/docker_image_module.md @@ -0,0 +1,207 @@ + +# Docker_Image Module Proposal + +## Purpose and Scope + +The purpose is to update the existing docker_image module. The updates include expanding the module's capabilities to +match the build, load, pull, push, rmi, and save docker commands and adding support for remote registries. + +Docker_image will manage images using docker-py to communicate with either a local or remote API. It will +support API versions >= 1.14. API connection details will be handled externally in a shared utility module similar +to how other cloud modules operate. + +## Parameters + +Docker_image will support the parameters listed below. API connection parameters will be part of a shared utility +module as mentioned above. + +``` +archive_path: + description: + - Save image to the provided path. Use with state present to always save the image to a tar archive. If + intermediate directories in the path do not exist, they will be created. If a matching + archive already exists, it will be overwritten. + default: null + +config_path: + description: + - Path to a custom docker config file. Docker-py defaults to using ~/.docker/config.json. + +cgroup_parent: + description: + - Optional parent cgroup for build containers. + default: null + +cpu_shares: + description: + - CPU shares for build containers. Integer value. + default: 0 + +cpuset_cpus: + description: + - CPUs in which to allow build container execution C(1,3) or C(1-3). + default: null + +dockerfile: + description: + - Name of dockerfile to use when building an image. + default: Dockerfile + +email: + description: + - The email for the registry account. Provide with username and password when credentials are not encoded + in docker configuration file or when encoded credentials should be updated. + default: null + nolog: true + +force: + description: + - Use with absent state to un-tag and remove all images matching the specified name. Use with present state to + force a pull or rebuild of the image. + default: false + +load_path: + description: + - Use with state present to load a previously save image. Provide the full path to the image archive file. + default: null + +memory: + description: + - Build container limit. Memory limit specified as a positive integer for number of bytes. + +memswap: + description: + - Build container limit. Total memory (memory + swap). Specify as a positive integer for number of bytes or + -1 to disable swap. + default: null + +name: + description: + - Image name or ID. + required: true + +nocache: + description: + - Do not use cache when building an image. + deafult: false + +password: + description: + - Password used when connecting to the registry. Provide with username and email when credentials are not encoded + in docker configuration file or when encoded credentials should be updated. + default: null + nolog: true + +path: + description: + - Path to Dockerfile and context from which to build an image. + default: null + +push: + description: + - Use with state present to always push an image to the registry. + default: false + +registry: + description: + - URL of the registry. If not provided, defaults to Docker Hub. + default: null + +rm: + description: + - Remove intermediate containers after build. + default: true + +tag: + description: + - Image tags. When pulling or pushing, set to 'all' to include all tags. + default: latest + +url: + description: + - The location of a Git repository. The repository acts as the context when building an image. + - Mutually exclusive with path. + +username: + description: + - Username used when connecting to the registry. Provide with password and email when credentials are not encoded + in docker configuration file or when encoded credentials should be updated. + default: null + nolog: true + +state: + description: + - "absent" - if image exists, unconditionally remove it. Use the force option to un-tag and remove all images + matching the provided name. + - "present" - check if image is present with the provided tag. If the image is not present or the force option + is used, the image will either be pulled from the registry, built or loaded from an archive. To build the image, + provide a path or url to the context and Dockerfile. To load an image, use load_path to provide a path to + an archive file. If no path, url or load_path is provided, the image will be pulled. Use the registry + parameters to control the registry from which the image is pulled. + +required: false +default: present +choices: + - absent + - present + +http_timeout: + description: + - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of + seconds. + default: null + +``` + + +## Examples + +``` +- name: build image + docker_image: + path: "/path/to/build/dir" + name: "my_app" + tags: + - v1.0 + - mybuild + +- name: force pull an image and all tags + docker_image: + name: "my/app" + force: yes + tags: all + +- name: untag and remove image + docker_image: + name: "my/app" + state: absent + force: yes + +- name: push an image to Docker Hub with all tags + docker_image: + name: my_image + push: yes + tags: all + +- name: pull image from a private registry + docker_image: + name: centos + registry: https://private_registry:8080 + +``` + + +## Returns + +``` +{ + changed: True + failed: False + rc: 0 + action: built | pulled | loaded | removed | none + msg: < text confirming the action that was taken > + results: { + < output from docker inspect for the affected image > + } +} +``` \ No newline at end of file From d0137abf9848f4653bd4b52123a59d163cf25ef4 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Wed, 2 Mar 2016 02:26:45 -0500 Subject: [PATCH 0789/1113] Adding docker_volume_facts module proposal --- docs/proposals/docker/docker_volume_facts.md | 48 ++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 docs/proposals/docker/docker_volume_facts.md diff --git a/docs/proposals/docker/docker_volume_facts.md b/docs/proposals/docker/docker_volume_facts.md new file mode 100644 index 00000000000..119df27e337 --- /dev/null +++ b/docs/proposals/docker/docker_volume_facts.md @@ -0,0 +1,48 @@ + +# Docker_Volume_Facts Module Proposal + +## Purpose and Scope + +Docker_volume_facts will inspect volumes. + +Docker_volume_facts will use docker-py to communicate with either a local or remote API. It will +support API versions >= 1.14. API connection details will be handled externally in a shared utility module similar +to how other cloud modules operate. + +## Parameters + +Docker_volume_facts will accept the parameters listed below. API connection parameters will be part of a shared +utility module as mentioned above. + + +``` +name: + description: + - Volume name or list of volume names. + default: null +``` + + +## Examples + +``` +- name: Inspect all volumes + docker_volume_facts + register: volume_facts + +- name: Inspect a specific volume + docker_volume_facts: + name: data + register: data_vol_facts +``` + +# Returns + +``` +{ + changed: False + failed: False + rc: 0 + results: [ < output from volume inspection > ] +} +``` \ No newline at end of file From f10807eb4286f2d92d3100e59bfa4b27f89fcd32 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Wed, 2 Mar 2016 01:54:34 -0500 Subject: [PATCH 0790/1113] Adding docker_network_facts module proposal. --- docs/proposals/docker/docker_network_facts.md | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 docs/proposals/docker/docker_network_facts.md diff --git a/docs/proposals/docker/docker_network_facts.md b/docs/proposals/docker/docker_network_facts.md new file mode 100644 index 00000000000..84576049ba1 --- /dev/null +++ b/docs/proposals/docker/docker_network_facts.md @@ -0,0 +1,48 @@ + +# Docker_Network_Facts Module Proposal + +## Purpose and Scope + +Docker_network_facts will inspect networks. + +Docker_network_facts will use docker-py to communicate with either a local or remote API. It will +support API versions >= 1.14. API connection details will be handled externally in a shared utility module similar +to how other cloud modules operate. + +## Parameters + +Docker_network_facts will accept the parameters listed below. API connection parameters will be part of a shared +utility module as mentioned above. + +``` +name: + description: + - Network name or list of network names. + default: null + +``` + + +## Examples + +``` +- name: Inspect all networks + docker_network_facts + register: network_facts + +- name: Inspect a specific network and format the output + docker_network_facts + name: web_app + register: web_app_facts +``` + +# Returns + +``` +{ + changed: False + failed: False + rc: 0 + results: [ < inspection output > ] +} +``` From e0f41c7726355c9282b840327a3848f677b384ca Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Tue, 1 Mar 2016 13:42:39 -0500 Subject: [PATCH 0791/1113] Updated docker_image_facts module proposal. --- .../docker/docker_image_facts_module.md | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 docs/proposals/docker/docker_image_facts_module.md diff --git a/docs/proposals/docker/docker_image_facts_module.md b/docs/proposals/docker/docker_image_facts_module.md new file mode 100644 index 00000000000..a399e682b01 --- /dev/null +++ b/docs/proposals/docker/docker_image_facts_module.md @@ -0,0 +1,47 @@ + +# Docker_Image_Facts Module Proposal + +## Purpose and Scope + +The purpose of docker_image_facts is to inspect docker images. + +Docker_image_facts will use docker-py to communicate with either a local or remote API. It will +support API versions >= 1.14. API connection details will be handled externally in a shared utility module similar +to how other cloud modules operate. + +## Parameters + +Docker_image_facts will support the parameters listed below. API connection parameters will be part of a shared +utility module as mentioned above. + +``` +name: + description: + - An image name or list of image names. The image name can include a tag using the format C(name:tag). + default: null +``` + +## Examples + +``` +- name: Inspect all images + docker_image_facts + register: image_facts + +- name: Inspect a single image + docker_image_facts: + name: myimage:v1 + register: myimage_v1_facts +``` + +## Returns + +``` +{ + changed: False + failed: False + rc: 0 + result: [ < inspection output > ] +} +``` + From 78f6b619904ae6c87426ce0ae5a1d07e74037cf4 Mon Sep 17 00:00:00 2001 From: Jinesh Choksi <jinesh-choksi@users.noreply.github.com> Date: Wed, 2 Mar 2016 10:54:37 +0000 Subject: [PATCH 0792/1113] On RHEL/CentOS 6 needed additional dependencies to build RPM To build an RPM on RHEL 6, we needed asciidoc, git and python-setuptools in addition to the rpm-build/make and python2-devel packages. --- docsite/rst/intro_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index d132752f36a..0859c4ce2a7 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -197,7 +197,7 @@ Fedora users can install Ansible directly, though if you are using RHEL or CentO # install the epel-release RPM if needed on CentOS, RHEL, or Scientific Linux $ sudo yum install ansible -You can also build an RPM yourself. From the root of a checkout or tarball, use the ``make rpm`` command to build an RPM you can distribute and install. Make sure you have ``rpm-build``, ``make``, and ``python2-devel`` installed. +You can also build an RPM yourself. From the root of a checkout or tarball, use the ``make rpm`` command to build an RPM you can distribute and install. Make sure you have ``rpm-build``, ``make``, ``asciidoc``, ``git``, ``python-setuptools`` and ``python2-devel`` installed. .. code-block:: bash From 26005dfa5beef98948fbb66ce90c68b89f17247a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= <mail@renemoser.net> Date: Wed, 2 Mar 2016 11:09:47 +0100 Subject: [PATCH 0793/1113] proposal: deprecate always_run in favor of checkmode_run --- docs/proposals/rename_always_run.md | 34 +++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 docs/proposals/rename_always_run.md diff --git a/docs/proposals/rename_always_run.md b/docs/proposals/rename_always_run.md new file mode 100644 index 00000000000..6adcd508f49 --- /dev/null +++ b/docs/proposals/rename_always_run.md @@ -0,0 +1,34 @@ +# Rename always_run to checkmode_run + +*Author*: René Moser <@resmo> + +*Date*: 02/03/2016 + +## Motivation + +The task argument `always_run` is misleading. + +Ansible is known to be readable by users without deep knowledge of creating playbooks, they do not understand +what `always_run` does at the first glance. + +### Problems + +The following looks scary if you have no idea, what `always_run` does: + +``` +- shell: dangerous_cleanup.sh + when: cleanup == "yes" + always_run: yes +``` + +You have a conditional but also a word that says `always`. This is a conflict in terms of understanding. + +## Solution Proposal + +Deprecate `always_run` by rename it to `checkmode_run`: + +``` +- shell: dangerous_cleanup.sh + when: cleanup == "yes" + checkmode_run: yes +``` From 2c7a33f71d32a0c2ea64669ad5d0151e0f4fa6d0 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 2 Mar 2016 08:05:43 -0500 Subject: [PATCH 0794/1113] Fixing bug in setup related to StringIO fixes --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 974d47a55d9..f5558e86505 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -257,7 +257,7 @@ class Facts(object): # load raw ini cp = ConfigParser.ConfigParser() try: - cp.readfp(StringIO.StringIO(out)) + cp.readfp(StringIO(out)) except ConfigParser.Error: fact = "error loading fact - please check content" else: From 7404d4ce7fb9bc4362a4c560406e4685c04cfcd6 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 2 Mar 2016 09:19:02 -0500 Subject: [PATCH 0795/1113] made dict example less ambigous fixes #14748 --- docsite/rst/YAMLSyntax.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index 8189a6caf6c..97698d68964 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -37,7 +37,7 @@ All members of a list are lines beginning at the same indentation level starting A dictionary is represented in a simple ``key: value`` form (the colon must be followed by a space):: # An employee record - - martin: + martin: name: Martin D'vloper job: Developer skill: Elite @@ -45,9 +45,8 @@ A dictionary is represented in a simple ``key: value`` form (the colon must be f Dictionaries and lists can also be represented in an abbreviated form if you really want to:: --- - employees: - - martin: {name: Martin D'vloper, job: Developer, skill: Elite} - fruits: ['Apple', 'Orange', 'Strawberry', 'Mango'] + martin: {name: Martin D'vloper, job: Developer, skill: Elite} + fruits: ['Apple', 'Orange', 'Strawberry', 'Mango] .. _truthiness: From 32473901234a4641094562a9d0629ff353d22966 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 2 Mar 2016 09:34:07 -0500 Subject: [PATCH 0796/1113] Conditionally import StringIO in template so we only use io.StringIO on py3 --- lib/ansible/template/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index a7a8ac4a37c..e780c245cdd 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -23,7 +23,11 @@ import ast import contextlib import os import re -from io import StringIO + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO from ansible.compat.six import string_types, text_type, binary_type from jinja2 import Environment From 5aef65edcdf98abfc78d9fe50b927c97bc4ade4d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 2 Mar 2016 07:47:10 -0800 Subject: [PATCH 0797/1113] Testing whether this fixes jenkins --- lib/ansible/template/__init__.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index e780c245cdd..32f2144e0bc 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -24,10 +24,7 @@ import contextlib import os import re -try: - from StringIO import StringIO -except ImportError: - from io import StringIO +from io import StringIO from ansible.compat.six import string_types, text_type, binary_type from jinja2 import Environment @@ -293,10 +290,17 @@ class Templar: # Don't template unsafe variables, instead drop them back down to their constituent type. if hasattr(variable, '__UNSAFE__'): if isinstance(variable, text_type): - return self._clean_data(text_type(variable)) + return self._clean_data(variable) elif isinstance(variable, binary_type): - return self._clean_data(bytes(variable)) + # If we're unicode sandwiching, then we shouldn't get here but + # seems like we are. Will have to decide whether to turn them + # into text_type instead + raise AnsibleError("variable is str: %s" % variable) + #elif isinstance(variable, binary_type): + # return self._clean_data(bytes(variable)) else: + # Do we need to convert these into text_type as well? + # return self._clean_data(to_unicode(variable._obj, nonstring='passthru')) return self._clean_data(variable._obj) try: From d85589121efd331ee4b17b2d0d3ee9d9469be9ce Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Wed, 2 Mar 2016 11:22:34 -0500 Subject: [PATCH 0798/1113] Updating docker_network module proposal. --- .../proposals/docker/docker_network_module.md | 86 ++++++++----------- 1 file changed, 38 insertions(+), 48 deletions(-) diff --git a/docs/proposals/docker/docker_network_module.md b/docs/proposals/docker/docker_network_module.md index 4b07e3072dd..bf86e672e2b 100644 --- a/docs/proposals/docker/docker_network_module.md +++ b/docs/proposals/docker/docker_network_module.md @@ -15,7 +15,27 @@ Docker_network will accept the parameters listed below. Parameters related to co a shared utility module, as mentioned above. ``` -containers: +connected: + description: + - List of container names or container IDs to connect to a network. + default: null + +driver: + description: + - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used. + default: bridge + +force: + description: + - With state 'absent' forces disconnecting all containers from the network prior to deleting the network. With + state 'present' will disconnect all containers, delete the network and re-create the network. + default: false + +incremental: + description: + - By default the connected list is canonical, meaning containers not on the list are removed from the network. + Use incremental to leave existing containers connected. + default: false network_name: description: @@ -23,52 +43,23 @@ network_name: default: null required: true -driver: - description: - - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used. - default: bridge - options: description: - Dictionary of network settings. Consult docker docs for valid options and values. default: null - -connected: - description: - - List of container names or container IDs to connect to a network. - default: null - -disconnected: - description: - - List of container names or container IDs to disconnect from a network. - default: null - -disconnect_all: - description: - - Disconnect all containers, unless the containers is in the provided list of connected containers. If no - list of connected containers is provided, all containers will be disconnnected. - default: false - -force: - description: - - With state 'absent' forces disconnecting all containers from the network prior to deleting the network. With - state 'present' will disconnect all containers, delete the network and re-create the network. - default: false state: description: - "absent" deletes the network. If a network has connected containers, it cannot be deleted. Use the force option to disconnect all containers and delete the network. - "present" creates the network, if it does not already exist with the specified parameters, and connects the list - of containers provided via the connected parameter. Use disconnected to remove a set of containers from the - network. Use disconnect_all to remove from the network any containers not included in the containers parameter. - If disconnected is provided with no list of connected parameter, all containers will be removed from the - network. Use the force options to force the re-creation of the network. + of containers provided via the connected parameter. Containers not on the list will be disconnected. An empty + list will leave no containers connected to the network. Use the incremental option to leave existing containers + connected. Use the force options to force re-creation of the network. default: present choices: - absent - present - ``` @@ -85,34 +76,33 @@ state: connected: - containera - containerb - disconnect_all: yes + - containerc -- name: Remove a container from the network +- name: Remove a single container docker_network: name: network_one - disconnected: - - containerb - -- name: Delete a network, disconnected all containers - docker_network: - name: network_one - state: absent - force: yes - -- name: Add a container to a network + connected: "{{ fulllist|difference(['containera']) }}" + +- name: Add a container to a network, leaving existing containers connected docker_network: name: network_one connected: - containerc - -- name: Create a network with options (Not sure if 'ip_range' is correct name) + incremental: yes + +- name: Create a network with options (Not sure if 'ip_range' is correct key name) docker_network name: network_two options: subnet: '172.3.26.0/16' gateway: 172.3.26.1 ip_range: '192.168.1.0/24' - + +- name: Delete a network, disconnecting all containers + docker_network: + name: network_one + state: absent + force: yes ``` ## Returns: From 7ba790bbaf5059fb5a858227b7b1e5f852a1a288 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 2 Mar 2016 11:54:41 -0500 Subject: [PATCH 0799/1113] don't tempalte register --- lib/ansible/playbook/task.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 56f3a15bf8e..556aea5043a 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -229,6 +229,13 @@ class Task(Base, Conditional, Taggable, Become): super(Task, self).post_validate(templar) + def _post_validate_register(self, attr, value, templar): + ''' + Override post validation for the register args field, which is not + supposed to be templated + ''' + return value + def _post_validate_loop_args(self, attr, value, templar): ''' Override post validation for the loop args field, which is templated From 093b3422ec95a5c93d281e334eab7d81e5eb49e3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 2 Mar 2016 08:58:15 -0800 Subject: [PATCH 0800/1113] more information --- lib/ansible/template/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 32f2144e0bc..5e654e9e88f 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -504,7 +504,8 @@ class Templar: ) else: debug("failing because of a type error, template data is: %s" % data) - raise AnsibleError("an unexpected type error occurred. Error was %s" % te) + import traceback + raise AnsibleError("an unexpected type error occurred. Error was %s, tracback: %s" % (te, traceback.format_exc())) if preserve_trailing_newlines: # The low level calls above do not preserve the newline From ca0797fc4eb1f143e25ec214aac4168017cac292 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 2 Mar 2016 11:20:28 -0500 Subject: [PATCH 0801/1113] avoid issues when stdin is a closed file this seems to happen when nohup is involved, so the check tty does not get a chance to fail, it just works with pipes fixes http://github.com/ansible/ansible-modules-core/issues/3166 --- lib/ansible/plugins/action/pause.py | 31 ++++++++++++++++++----------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/lib/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py index 97fa9ac3207..afe4f1d34bb 100644 --- a/lib/ansible/plugins/action/pause.py +++ b/lib/ansible/plugins/action/pause.py @@ -120,23 +120,29 @@ class ActionModule(ActionBase): # save the attributes on the existing (duped) stdin so # that we can restore them later after we set raw mode - fd = self._connection._new_stdin.fileno() - if isatty(fd): - old_settings = termios.tcgetattr(fd) - tty.setraw(fd) - - # flush the buffer to make sure no previous key presses - # are read in below - termios.tcflush(self._connection._new_stdin, termios.TCIFLUSH) + fd = None + try: + fd = self._connection._new_stdin.fileno() + except ValueError: + # someone is using a closed file descriptor as stdin + pass + if fd is not None: + if isatty(fd): + old_settings = termios.tcgetattr(fd) + tty.setraw(fd) + # flush the buffer to make sure no previous key presses + # are read in below + termios.tcflush(self._connection._new_stdin, termios.TCIFLUSH) while True: try: - key_pressed = self._connection._new_stdin.read(1) - if key_pressed == '\x03': - raise KeyboardInterrupt + if fd is not None: + key_pressed = self._connection._new_stdin.read(1) + if key_pressed == '\x03': + raise KeyboardInterrupt if not seconds: - if not isatty(fd): + if fd is None or not isatty(fd): display.warning("Not waiting from prompt as stdin is not interactive") break # read key presses and act accordingly @@ -154,6 +160,7 @@ class ActionModule(ActionBase): else: raise AnsibleError('user requested abort!') + except AnsibleTimeoutExceeded: # this is the exception we expect when the alarm signal # fires, so we simply ignore it to move into the cleanup From 1dbbd2a80cbb7331c53fdf1c9fd0c6e8817a072f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 2 Mar 2016 09:31:06 -0800 Subject: [PATCH 0802/1113] Remove debugging and fix the place where bytes are being handed to StringIO --- lib/ansible/template/__init__.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 5e654e9e88f..6a07566fa0e 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -40,6 +40,7 @@ from ansible.template.safe_eval import safe_eval from ansible.template.template import AnsibleJ2Template from ansible.template.vars import AnsibleJ2Vars from ansible.utils.debug import debug +from ansible.utils.unicode import to_unicode try: from hashlib import sha1 @@ -255,10 +256,10 @@ class Templar: if prev_idx is not None: # replace the opening data.seek(prev_idx, os.SEEK_SET) - data.write(self.environment.comment_start_string) + data.write(to_unicode(self.environment.comment_start_string)) # replace the closing data.seek(token_start, os.SEEK_SET) - data.write(self.environment.comment_end_string) + data.write(to_unicode(self.environment.comment_end_string)) else: raise AnsibleError("Error while cleaning data for safety: unhandled regex match") @@ -291,13 +292,6 @@ class Templar: if hasattr(variable, '__UNSAFE__'): if isinstance(variable, text_type): return self._clean_data(variable) - elif isinstance(variable, binary_type): - # If we're unicode sandwiching, then we shouldn't get here but - # seems like we are. Will have to decide whether to turn them - # into text_type instead - raise AnsibleError("variable is str: %s" % variable) - #elif isinstance(variable, binary_type): - # return self._clean_data(bytes(variable)) else: # Do we need to convert these into text_type as well? # return self._clean_data(to_unicode(variable._obj, nonstring='passthru')) @@ -504,8 +498,7 @@ class Templar: ) else: debug("failing because of a type error, template data is: %s" % data) - import traceback - raise AnsibleError("an unexpected type error occurred. Error was %s, tracback: %s" % (te, traceback.format_exc())) + raise AnsibleError("an unexpected type error occurred. Error was %s" % te) if preserve_trailing_newlines: # The low level calls above do not preserve the newline From 485bd4acc39f9c47eb4ceabfd4a14d3dc7c62043 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Wed, 2 Mar 2016 13:00:04 -0500 Subject: [PATCH 0803/1113] Fix docker_volume module proposal file name. --- .../docker/{docker_files_module.md => docker_volume_module.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/proposals/docker/{docker_files_module.md => docker_volume_module.md} (100%) diff --git a/docs/proposals/docker/docker_files_module.md b/docs/proposals/docker/docker_volume_module.md similarity index 100% rename from docs/proposals/docker/docker_files_module.md rename to docs/proposals/docker/docker_volume_module.md From 64f445f3a16905a0b3879c9a8080a4c585c46334 Mon Sep 17 00:00:00 2001 From: yyoshiki41 <yyoshiki41@gmail.com> Date: Thu, 3 Mar 2016 03:19:46 +0900 Subject: [PATCH 0804/1113] Fixed docs typo --- docs/man/man1/ansible-playbook.1.asciidoc.in | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in index 289e7917ddd..5a6ec659ff6 100644 --- a/docs/man/man1/ansible-playbook.1.asciidoc.in +++ b/docs/man/man1/ansible-playbook.1.asciidoc.in @@ -125,10 +125,6 @@ environment variable. Use this file to authenticate the connection -*--skip-tages=*'SKIP_TAGS':: - -Only run plays and tasks whose tags do not match these values. - *--start-at-task=*'START_AT':: Start the playbook at the task matching this name. From 6edf1443db41ad0ebbf63be57111494b8eac5fbd Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Wed, 2 Mar 2016 13:31:15 -0500 Subject: [PATCH 0805/1113] Adding docker_files module proposal --- docs/proposals/docker/docker_files_module.md | 159 +++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 docs/proposals/docker/docker_files_module.md diff --git a/docs/proposals/docker/docker_files_module.md b/docs/proposals/docker/docker_files_module.md new file mode 100644 index 00000000000..3970584f0d3 --- /dev/null +++ b/docs/proposals/docker/docker_files_module.md @@ -0,0 +1,159 @@ +# Docker_Files Modules Proposal + +## Purpose and Scope + +The purpose of docker_files is to provide for retrieving a file or folder from a container's file system, +inserting a file or folder into a container, exporting a container's entire filesystem as a tar archive, or +retrieving a list of changed files from a container's file system. + +Docker_files will manage a container using docker-py to communicate with either a local or remote API. It will +support API versions >= 1.14. API connection details will be handled externally in a shared utility module similar to +how other cloud modules operate. + +## Parameters + +Docker_files accepts the parameters listed below. API connection parameters will be part of a shared utility module +as mentioned above. + +``` +diff: + description: + - Provide a list of container names or IDs. For each container a list of changed files and directories found on the + container's file system will be returned. Diff is mutually exclusive from all other options except event_type. + Use event_type to choose which events to include in the output. + default: null + +export: + description: + - Provide a container name or ID. The container's file system will be exported to a tar archive. Use dest + to provide a path for the archive on the local file system. If the output file already exists, it will not be + overwritten. Use the force option to overwrite an existing archive. + default: null + +dest: + description: + - Destination path of copied files. If the destination is a container file system, precede the path with a + container name or ID + ':'. For example, C(mycontainer:/path/to/file.txt). If the destination path does not + exist, it will be created. If the destination path exists on a the local filesystem, it will not be overwritten. + Use the force option to overwrite existing files on the local filesystem. + default: null + +force: + description: + - Overwrite existing files on the local filesystem. + default: false + +follow_link: + description: + - Follow symbolic links in the src path. If src is local and file is a symbolic link, the symbolic link, not the + target is copied by default. To copy the link target and not the link, set follow_link to true. + default: false + +event_type: + description: + - Select the specific event type to list in the diff output. + choices: + - all + - add + - delete + - change + default: all + +src: + description: + - The source path of file(s) to be copied. If source files are found on the container's file system, precede the + path with the container name or ID + ':'. For example, C(mycontainer:/path/to/files). + default: null + +``` + +## Examples + +``` +- name: Copy files from the local file system to a container's file system + docker_files: + src: /tmp/rpm + dest: mycontainer:/tmp + follow_links: yes + +- name: Copy files from the container to the local filesystem and overwrite existing files + docker_files: + src: container1:/var/lib/data + dest: /tmp/container1/data + force: yes + +- name: Export container filesystem + docker_file: + export: container1 + dest: /tmp/conainer1.tar + force: yes + +- name: List all differences for multiple containers. + docker_files: + diff: + - mycontainer1 + - mycontainer2 + +- name: Included changed files only in diff output + docker_files: + diff: + - mycontainer1 + event_type: change +``` + +## Returns + +Returned from diff: + +``` +{ + changed: false, + failed: false, + rc: 0, + results: { + mycontainer1: [ + { state: 'C', path: '/dev' }, + { state: 'A', path: '/dev/kmsg' }, + { state: 'C', path: '/etc' }, + { state: 'A', path: '/etc/mtab' } + ], + mycontainer2: [ + { state: 'C', path: '/foo' }, + { state: 'A', path: '/foo/bar.txt' } + ] + } +} +``` + +Returned when copying files: + +``` +{ + changed: true, + failed: false, + rc: 0, + results: { + src: /tmp/rpms, + dest: mycontainer:/tmp + files_copied: [ + 'file1.txt', + 'file2.jpg' + ] + } +} +``` + +Return when exporting container filesystem: + +``` +{ + changed: true, + failed: false, + rc: 0, + results: { + src: container_name, + dest: local/path/archive_name.tar + } +} + +``` From 5975296770681de396838beeaa3dc1bfde841fb0 Mon Sep 17 00:00:00 2001 From: Joe <joe@enbrite.ly> Date: Wed, 2 Mar 2016 19:44:25 +0100 Subject: [PATCH 0806/1113] Fix AWS EC2 external inventory script config read EC2 inventory scripts reads configuration from an INI file. The `instance_filters` option controls which EC2 instances are retrieved for inventory. Filling this option and running the inventory script with Python 3 crashes with the following error: ```python Traceback (most recent call last): File "./contrib/inventory/ec2.py", line 1328, in <module> Ec2Inventory() File "./contrib/inventory/ec2.py", line 163, in __init__ self.read_settings() File "./contrib/inventory/ec2.py", line 393, in read_settings for instance_filter in config.get('ec2', 'instance_filters', '').split(','): TypeError: get() takes 3 positional arguments but 4 were given ``` The problem is the last parameter of config.get() call, because `fallback` keyword argument is not specified. The fix handles epmpty `instance_filers` in case of Python 2&3 --- contrib/inventory/ec2.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index 4c5cf23fcb8..f9a943efbe0 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -388,7 +388,10 @@ class Ec2Inventory(object): # Instance filters (see boto and EC2 API docs). Ignore invalid filters. self.ec2_instance_filters = defaultdict(list) if config.has_option('ec2', 'instance_filters'): - for instance_filter in config.get('ec2', 'instance_filters', '').split(','): + + filters = [tag for tag in config.get('ec2', 'instance_filters').split(',') if tag] + + for instance_filter in filters: instance_filter = instance_filter.strip() if not instance_filter or '=' not in instance_filter: continue From 21ba6e9ce83410d35adbc4c3b5b12a07a6e28271 Mon Sep 17 00:00:00 2001 From: Joe <joe@enbrite.ly> Date: Wed, 2 Mar 2016 20:06:14 +0100 Subject: [PATCH 0807/1113] Do not use name `tag` for intance filter --- contrib/inventory/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index f9a943efbe0..86327360b90 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -389,7 +389,7 @@ class Ec2Inventory(object): self.ec2_instance_filters = defaultdict(list) if config.has_option('ec2', 'instance_filters'): - filters = [tag for tag in config.get('ec2', 'instance_filters').split(',') if tag] + filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f] for instance_filter in filters: instance_filter = instance_filter.strip() From c24249c57d0ee166f064d6d1198819f87ed7225e Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 10 Feb 2016 11:23:49 -0500 Subject: [PATCH 0808/1113] made max diff size configurable --- examples/ansible.cfg | 4 ++++ lib/ansible/constants.py | 2 +- lib/ansible/plugins/action/__init__.py | 5 +++-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index c9dc7592a40..91ef70b77a5 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -198,6 +198,10 @@ # is used. This value must be an integer from 0 to 9. #var_compression_level = 9 +# This controls the cutoff point (in bytes) on --diff for files +# set to 0 for unlimited (RAM may suffer!). +#max_diff_size = 1048576 + [privilege_escalation] #become=True #become_method=sudo diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index bbb32c9cc35..6623b8f0c34 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -239,6 +239,7 @@ RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, ispath=True) DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, isnone=True) DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'ANSIBLE_DISPLAY_ARGS_TO_STDOUT', False, boolean=True) +MAX_FILE_SIZE_FOR_DIFF = get_config(p, DEFAULTS, 'max_diff_size', 'ANSIBLE_MAX_DIFF_SIZE', 1024*1024, integer=True) # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-o ControlMaster=auto -o ControlPersist=60s') @@ -298,6 +299,5 @@ DEFAULT_SUBSET = None DEFAULT_SU_PASS = None VAULT_VERSION_MIN = 1.0 VAULT_VERSION_MAX = 1.0 -MAX_FILE_SIZE_FOR_DIFF = 1*1024*1024 TREE_DIR = None LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1']) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 28eb6ffe6e7..093ddd058e5 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -615,7 +615,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): diff['before'] = '' elif peek_result['appears_binary']: diff['dst_binary'] = 1 - elif peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF: + elif C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF: diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF else: display.debug("Slurping the file %s" % source) @@ -631,7 +631,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): if source_file: st = os.stat(source) - if st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF: + if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF: diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF else: display.debug("Reading local copy of the file %s" % source) @@ -640,6 +640,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): src_contents = src.read() except Exception as e: raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e))) + if "\x00" in src_contents: diff['src_binary'] = 1 else: From 6ff0b079b4688df943af2da71c7533a0bf9ba0cb Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Wed, 2 Mar 2016 16:30:16 -0600 Subject: [PATCH 0809/1113] Support SSL validation with redirect control for python versions without ssl context --- lib/ansible/module_utils/urls.py | 50 ++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index a930483463b..0d0965e3abe 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -427,7 +427,7 @@ class RequestWithMethod(urllib2.Request): return urllib2.Request.get_method(self) -def RedirectHandlerFactory(follow_redirects=None): +def RedirectHandlerFactory(follow_redirects=None, validate_certs=True): """This is a class factory that closes over the value of ``follow_redirects`` so that the RedirectHandler class has access to that value without having to use globals, and potentially cause problems @@ -442,17 +442,17 @@ def RedirectHandlerFactory(follow_redirects=None): """ def redirect_request(self, req, fp, code, msg, hdrs, newurl): - if follow_redirects == 'urllib2': - return urllib2.HTTPRedirectHandler.redirect_request(self, req, - fp, code, - msg, hdrs, - newurl) + handler = maybe_add_ssl_handler(newurl, validate_certs) + if handler: + urllib2._opener.add_handler(handler) - if follow_redirects in [None, 'no', 'none']: + if follow_redirects == 'urllib2': + return urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl) + elif follow_redirects in ['no', 'none', False]: raise urllib2.HTTPError(newurl, code, msg, hdrs, fp) do_redirect = False - if follow_redirects in ['all', 'yes']: + if follow_redirects in ['all', 'yes', True]: do_redirect = (code >= 300 and code < 400) elif follow_redirects == 'safe': @@ -650,15 +650,7 @@ class SSLValidationHandler(urllib2.BaseHandler): https_request = http_request -# Rewrite of fetch_url to not require the module environment -def open_url(url, data=None, headers=None, method=None, use_proxy=True, - force=False, last_mod_time=None, timeout=10, validate_certs=True, - url_username=None, url_password=None, http_agent=None, - force_basic_auth=False, follow_redirects='urllib2'): - ''' - Fetches a file from an HTTP/FTP server using urllib2 - ''' - handlers = [] +def maybe_add_ssl_handler(url, validate_certs): # FIXME: change the following to use the generic_urlparse function # to remove the indexed references for 'parsed' parsed = urlparse.urlparse(url) @@ -678,9 +670,24 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, port = 443 # create the SSL validation handler and # add it to the list of handlers - ssl_handler = SSLValidationHandler(hostname, port) + return SSLValidationHandler(hostname, port) + +# Rewrite of fetch_url to not require the module environment +def open_url(url, data=None, headers=None, method=None, use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, + force_basic_auth=False, follow_redirects=False): + ''' + Fetches a file from an HTTP/FTP server using urllib2 + ''' + handlers = [] + ssl_handler = maybe_add_ssl_handler(url, validate_certs) + if ssl_handler: handlers.append(ssl_handler) + # FIXME: change the following to use the generic_urlparse function + # to remove the indexed references for 'parsed' + parsed = urlparse.urlparse(url) if parsed[0] != 'ftp': username = url_username @@ -731,8 +738,7 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, if hasattr(socket, 'create_connection') and CustomHTTPSHandler: handlers.append(CustomHTTPSHandler) - if follow_redirects != 'urllib2': - handlers.append(RedirectHandlerFactory(follow_redirects)) + handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs)) opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) @@ -821,7 +827,9 @@ def fetch_url(module, url, data=None, headers=None, method=None, password = module.params.get('url_password', '') http_agent = module.params.get('http_agent', None) force_basic_auth = module.params.get('force_basic_auth', '') - follow_redirects = follow_redirects or module.params.get('follow_redirects', 'urllib2') + + if not follow_redirects: + follow_redirects = module.params.get('follow_redirects', False) r = None info = dict(url=url) From 18d58e42d51bb84ca6d28d2e521f33859bcabe8c Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Thu, 3 Mar 2016 14:32:56 +0100 Subject: [PATCH 0810/1113] Improve ambiguous statement regarding handler order This fixes #10064 --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index e43dfd475a7..8ae7e82b4a4 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -382,7 +382,7 @@ Handlers are best used to restart services and trigger reboots. You probably won't need them for much else. .. note:: - * Notify handlers are always run in the order written. + * Notify handlers are always run in the same order they are parsed by Ansible, `not` in the order listed in the notify-statement. * Handler names live in a global namespace. * If two handler tasks have the same name, only one will run. `* <https://github.com/ansible/ansible/issues/4943>`_ From 85a28179b7b3884fb9e3314977b5712c2e95f949 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Thu, 3 Mar 2016 14:38:40 +0100 Subject: [PATCH 0811/1113] Improve the wording a bit According to what @amenonsen suggested in #10064. Although the additional statement is still needed to get rid of the ambiguity. --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 8ae7e82b4a4..d14c8412e38 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -382,7 +382,7 @@ Handlers are best used to restart services and trigger reboots. You probably won't need them for much else. .. note:: - * Notify handlers are always run in the same order they are parsed by Ansible, `not` in the order listed in the notify-statement. + * Notify handlers are always run in the same order they are defined, `not` in the order listed in the notify-statement. * Handler names live in a global namespace. * If two handler tasks have the same name, only one will run. `* <https://github.com/ansible/ansible/issues/4943>`_ From 4657be4eabc4d3882f407624d54715d2af197142 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 3 Mar 2016 09:03:28 -0800 Subject: [PATCH 0812/1113] Transform pathnames to bytes before passing on to os.path functions --- lib/ansible/parsing/dataloader.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py index f40d72450c2..cc88074f8e9 100644 --- a/lib/ansible/parsing/dataloader.py +++ b/lib/ansible/parsing/dataloader.py @@ -35,7 +35,7 @@ from ansible.parsing.yaml.loader import AnsibleLoader from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode from ansible.module_utils.basic import is_executable from ansible.utils.path import unfrackpath -from ansible.utils.unicode import to_unicode +from ansible.utils.unicode import to_unicode, to_bytes class DataLoader(): @@ -114,15 +114,15 @@ class DataLoader(): def path_exists(self, path): path = self.path_dwim(path) - return os.path.exists(path) + return os.path.exists(to_bytes(path)) def is_file(self, path): path = self.path_dwim(path) - return os.path.isfile(path) or path == os.devnull + return os.path.isfile(to_bytes(path, errors='strict')) or path == os.devnull def is_directory(self, path): path = self.path_dwim(path) - return os.path.isdir(path) + return os.path.isdir(to_bytes(path, errors='strict')) def list_directory(self, path): path = self.path_dwim(path) @@ -231,8 +231,8 @@ class DataLoader(): basedir = unfrackpath(path) # is it a role and if so make sure you get correct base path - if path.endswith('tasks') and os.path.exists(os.path.join(path,'main.yml')) \ - or os.path.exists(os.path.join(path,'tasks/main.yml')): + if path.endswith('tasks') and os.path.exists(to_bytes(os.path.join(path,'main.yml'), errors='strict')) \ + or os.path.exists(to_bytes(os.path.join(path,'tasks/main.yml'), errors='strict')): isrole = True if path.endswith('tasks'): basedir = unfrackpath(os.path.dirname(path)) @@ -255,7 +255,7 @@ class DataLoader(): search.append(self.path_dwim(source)) for candidate in search: - if os.path.exists(candidate): + if os.path.exists(to_bytes(candidate, errors='strict')): break return candidate @@ -266,8 +266,8 @@ class DataLoader(): retrieve password from STDOUT """ - this_path = os.path.realpath(os.path.expanduser(vault_password_file)) - if not os.path.exists(this_path): + this_path = os.path.realpath(to_bytes(os.path.expanduser(vault_password_file), errors='strict')) + if not os.path.exists(to_bytes(this_path, errors='strict')): raise AnsibleFileNotFound("The vault password file %s was not found" % this_path) if self.is_executable(this_path): From 7160b40ab16e8442c5fdcbb40fec3bbaaf6c8d5d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 3 Mar 2016 09:25:12 -0800 Subject: [PATCH 0813/1113] Fix the Makefile to run under dash (POSIX /bin/test syntax) --- test/integration/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index be1e153d178..7befd748bfb 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -132,12 +132,12 @@ blocks: rm -f block_test.out # run the test and check to make sure the right number of completions was logged ansible-playbook -vv test_blocks/main.yml | tee block_test.out - [ "$$(grep 'TEST COMPLETE' block_test.out | wc -l)" == "$$(egrep '^[0-9]+ plays in' block_test.out | cut -f1 -d' ')" ] + [ "$$(grep 'TEST COMPLETE' block_test.out | wc -l)" = "$$(egrep '^[0-9]+ plays in' block_test.out | cut -f1 -d' ')" ] # cleanup the output log again, to make sure the test is clean rm -f block_test.out # run test with free strategy and again count the completions ansible-playbook -vv test_blocks/main.yml -e test_strategy=free | tee block_test.out - [ "$$(grep 'TEST COMPLETE' block_test.out | wc -l)" == "$$(egrep '^[0-9]+ plays in' block_test.out | cut -f1 -d' ')" ] + [ "$$(grep 'TEST COMPLETE' block_test.out | wc -l)" = "$$(egrep '^[0-9]+ plays in' block_test.out | cut -f1 -d' ')" ] cloud: amazon rackspace azure From ea5e08905676beeefa70bd1eba2865f6ac3553ff Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Mar 2016 13:25:47 -0500 Subject: [PATCH 0814/1113] restore initial json parsing attempt to loader fixes issues with extra vars json strings not being parsed correctly by the yaml parser --- lib/ansible/parsing/dataloader.py | 37 ++++++++++++++++++------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py index cc88074f8e9..a508eacdda6 100644 --- a/lib/ansible/parsing/dataloader.py +++ b/lib/ansible/parsing/dataloader.py @@ -72,24 +72,29 @@ class DataLoader(): Creates a python datastructure from the given data, which can be either a JSON or YAML string. ''' - - # YAML parser will take JSON as it is a subset. - if isinstance(data, AnsibleUnicode): - # The PyYAML's libyaml bindings use PyUnicode_CheckExact so - # they are unable to cope with our subclass. - # Unwrap and re-wrap the unicode so we can keep track of line - # numbers - in_data = text_type(data) - else: - in_data = data + new_data = None try: - new_data = self._safe_load(in_data, file_name=file_name) - except YAMLError as yaml_exc: - self._handle_error(yaml_exc, file_name, show_content) + # we first try to load this data as JSON + new_data = json.loads(data) + except: + # must not be JSON, let the rest try + if isinstance(data, AnsibleUnicode): + # The PyYAML's libyaml bindings use PyUnicode_CheckExact so + # they are unable to cope with our subclass. + # Unwrap and re-wrap the unicode so we can keep track of line + # numbers + in_data = text_type(data) + else: + in_data = data + try: + new_data = self._safe_load(in_data, file_name=file_name) + except YAMLError as yaml_exc: + self._handle_error(yaml_exc, file_name, show_content) + + if isinstance(data, AnsibleUnicode): + new_data = AnsibleUnicode(new_data) + new_data.ansible_pos = data.ansible_pos - if isinstance(data, AnsibleUnicode): - new_data = AnsibleUnicode(new_data) - new_data.ansible_pos = data.ansible_pos return new_data def load_from_file(self, file_name): From 0031a72e0d7926bc87406e8d514d5ec9ec7dbbbf Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Mar 2016 13:51:44 -0500 Subject: [PATCH 0815/1113] tweak deprecation warnings display --- lib/ansible/utils/display.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 296a7b81806..3c0d3f7b5fc 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -188,12 +188,12 @@ class Display: if not removed: if version: - new_msg = "[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version) + new_msg = "[DEPRECATION WARNING]: %s.\nThis feature will be removed in version %s." % (msg, version) else: - new_msg = "[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg) + new_msg = "[DEPRECATION WARNING]: %s.\nThis feature will be removed in a future release." % (msg) new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n" else: - raise AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg) + raise AnsibleError("[DEPRECATED]: %s.\nPlease update your playbooks." % msg) wrapped = textwrap.wrap(new_msg, self.columns, replace_whitespace=False, drop_whitespace=False) new_msg = "\n".join(wrapped) + "\n" From 4cfd0428395f06013334e2888e6eba9fa7fc520e Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Mar 2016 13:52:09 -0500 Subject: [PATCH 0816/1113] more complete and informative errors mostly templating issues now all return data templated and actual error fixes #14781 --- lib/ansible/template/__init__.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 6a07566fa0e..806c964246c 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -40,7 +40,7 @@ from ansible.template.safe_eval import safe_eval from ansible.template.template import AnsibleJ2Template from ansible.template.vars import AnsibleJ2Vars from ansible.utils.debug import debug -from ansible.utils.unicode import to_unicode +from ansible.utils.unicode import to_unicode, to_str try: from hashlib import sha1 @@ -473,10 +473,10 @@ class Templar: try: t = myenv.from_string(data) except TemplateSyntaxError as e: - raise AnsibleError("template error while templating string: %s. String: %s" % (str(e), data)) + raise AnsibleError("template error while templating string: %s. String: %s" % (to_str(e), to_str(data))) except Exception as e: - if 'recursion' in str(e): - raise AnsibleError("recursive loop detected in template string: %s" % data) + if 'recursion' in to_str(e): + raise AnsibleError("recursive loop detected in template string: %s" % to_str(data)) else: return data @@ -492,13 +492,12 @@ class Templar: res = j2_concat(rf) except TypeError as te: if 'StrictUndefined' in str(te): - raise AnsibleUndefinedVariable( - "Unable to look up a name or access an attribute in template string. " + \ - "Make sure your variable name does not contain invalid characters like '-'." - ) + errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_str(data) + errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_str(te) + raise AnsibleUndefinedVariable(errmsg) else: - debug("failing because of a type error, template data is: %s" % data) - raise AnsibleError("an unexpected type error occurred. Error was %s" % te) + debug("failing because of a type error, template data is: %s" % to_str(data)) + raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_str(data),to_str(te))) if preserve_trailing_newlines: # The low level calls above do not preserve the newline From 951c8a5d27da586800f4b1eb9dc62cf553e17a23 Mon Sep 17 00:00:00 2001 From: Chris Lamb <chris@chris-lamb.co.uk> Date: Thu, 3 Mar 2016 19:16:58 +0000 Subject: [PATCH 0817/1113] Alias fileobj. --- lib/ansible/utils/display.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 296a7b81806..0031e2c3695 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -129,11 +129,12 @@ class Display: msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr)) if not stderr: - sys.stdout.write(msg2) - sys.stdout.flush() + fileobj = sys.stdout else: - sys.stderr.write(msg2) - sys.stderr.flush() + fileobj = sys.stderr + + fileobj.write(msg2) + fileobj.flush() if logger and not screen_only: msg2 = nocolor.lstrip(u'\n') From eb1141ee799fd8da80052c2e9d5722afab9f1682 Mon Sep 17 00:00:00 2001 From: Chris Lamb <chris@chris-lamb.co.uk> Date: Thu, 3 Mar 2016 19:21:06 +0000 Subject: [PATCH 0818/1113] Ignore EPIPE to avoid tracebacks when piping output to other commands For example: $ ansible web --list-hosts | head -n1 hosts (7): ERROR! Unexpected Exception: [Errno 32] Broken pipe Traceback (most recent call last): File "/home/lamby/git/private/lamby-ansible2/.venv/bin/ansible", line 114, in <module> display.display("to see the full traceback, use -vvv") File "/home/lamby/git/private/lamby-ansible2/.venv/local/lib/python2.7/site-packages/ansible/utils/display.py", line 133, in display sys.stdout.flush() IOError: [Errno 32] Broken pipe Such a pipe target will close up shop early when its seen enough input, causing ansible to print an ugly traceback. Signed-off-by: Chris Lamb <chris@chris-lamb.co.uk> --- lib/ansible/utils/display.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 0031e2c3695..5d056e5d229 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -28,6 +28,7 @@ import time import locale import logging import getpass +import errno from struct import unpack, pack from termios import TIOCGWINSZ from multiprocessing import Lock @@ -134,7 +135,14 @@ class Display: fileobj = sys.stderr fileobj.write(msg2) - fileobj.flush() + + try: + fileobj.flush() + except IOError as e: + # Ignore EPIPE in case fileobj has been prematurely closed, eg. + # when piping to "head -n1" + if e.errno != errno.EPIPE: + raise if logger and not screen_only: msg2 = nocolor.lstrip(u'\n') From c022a43a47eee8debe3d026d571a419d9c8c2eda Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Mar 2016 14:40:22 -0500 Subject: [PATCH 0819/1113] fixed str to to_str --- lib/ansible/template/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 806c964246c..19925aa26b0 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -491,7 +491,7 @@ class Templar: try: res = j2_concat(rf) except TypeError as te: - if 'StrictUndefined' in str(te): + if 'StrictUndefined' in to_str(te): errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_str(data) errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_str(te) raise AnsibleUndefinedVariable(errmsg) From 80b10bd66962d84fccbeb0d08a083bcdd992feb7 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 3 Mar 2016 15:22:35 -0500 Subject: [PATCH 0820/1113] Moving creating of testing work dir out of dep role and into Makefile --- test/integration/Makefile | 99 ++++++++++--------- .../roles/prepare_tests/tasks/main.yml | 25 +++-- 2 files changed, 64 insertions(+), 60 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 7befd748bfb..7a496f65de2 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -1,3 +1,4 @@ +TEST_DIR ?= ~/ansible_testing INVENTORY ?= inventory VARS_FILE ?= integration_config.yml @@ -21,14 +22,18 @@ VAULT_PASSWORD_FILE = vault-password CONSUL_RUNNING := $(shell python consul_running.py) -all: parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log +all: setup parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log + +setup: + rm -rf $(TEST_DIR) + mkdir -p $(TEST_DIR) parsing: - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5 - ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5 + ansible-playbook good_parsing.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -includes: - ansible-playbook test_includes.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) +includes: setup + ansible-playbook test_includes.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) pull: pull_run pull_no_127 pull_limit_inventory @@ -53,28 +58,28 @@ pull_limit_inventory: exit $$RC -unicode: - ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS) -e 'extra_var=café' +unicode: setup + ansible-playbook unicode.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) -v $(TEST_FLAGS) -e 'extra_var=café' # Test the start-at-task flag #9571 - ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v --start-at-task '*¶' -e 'start_at_task=True' $(TEST_FLAGS) + ansible-playbook unicode.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) -v --start-at-task '*¶' -e 'start_at_task=True' $(TEST_FLAGS) -test_templating_settings: - ansible-playbook test_templating_settings.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +test_templating_settings: setup + ansible-playbook test_templating_settings.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -environment: - ansible-playbook test_environment.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) +environment: setup + ansible-playbook test_environment.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) -non_destructive: - ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +non_destructive: setup + ansible-playbook non_destructive.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -destructive: - ansible-playbook destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +destructive: setup + ansible-playbook destructive.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -check_mode: - ansible-playbook check_mode.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v --check $(TEST_FLAGS) +check_mode: setup + ansible-playbook check_mode.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v --check $(TEST_FLAGS) -test_group_by: - ansible-playbook test_group_by.yml -i inventory.group_by -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +test_group_by: setup + ansible-playbook test_group_by.yml -i inventory.group_by -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) test_handlers: ansible-playbook test_handlers.yml --tags scenario1 -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) @@ -98,14 +103,14 @@ test_hash: ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' -test_var_precedence: - ansible-playbook test_var_precedence.yml -i $(INVENTORY) $(CREDENTIALS_ARG) $(TEST_FLAGS) -v -e 'extra_var=extra_var' -e 'extra_var_override=extra_var_override' +test_var_precedence: setup + ansible-playbook test_var_precedence.yml -i $(INVENTORY) $(CREDENTIALS_ARG) $(TEST_FLAGS) -v -e outputdir=$(TEST_DIR) -e 'extra_var=extra_var' -e 'extra_var_override=extra_var_override' -test_vault: - ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --list-tasks - ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --list-hosts - ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --syntax-check - ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) +test_vault: setup + ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --list-tasks -e outputdir=$(TEST_DIR) + ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --list-hosts -e outputdir=$(TEST_DIR) + ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) --syntax-check -e outputdir=$(TEST_DIR) + ansible-playbook test_vault.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) --vault-password-file $(VAULT_PASSWORD_FILE) -e outputdir=$(TEST_DIR) # test_delegate_to does not work unless we have permission to ssh to localhost. # Would take some more effort on our test systems to implement that -- probably @@ -113,30 +118,30 @@ test_vault: # root user on a node to ssh to itself. Until then, this is not in make all. # Have to run it manually. Ordinary users should be able to run this test as # long as they have permissions to login to their local machine via ssh. -test_delegate_to: - ansible-playbook test_delegate_to.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +test_delegate_to: setup + ansible-playbook test_delegate_to.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -test_winrm: - ansible-playbook test_winrm.yml -i inventory.winrm -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +test_winrm: setup + ansible-playbook test_winrm.yml -i inventory.winrm -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -test_tags: +test_tags: setup # Run everything by default - [ "$$(ansible-playbook --list-tasks test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always] Task_without_tag TAGS: []" ] + [ "$$(ansible-playbook --list-tasks test_tags.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always] Task_without_tag TAGS: []" ] # Run the exact tags, and always - [ "$$(ansible-playbook --list-tasks --tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always]" ] + [ "$$(ansible-playbook --list-tasks --tags tag test_tags.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always]" ] # Skip one tag - [ "$$(ansible-playbook --list-tasks --skip-tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_always_tag TAGS: [always] Task_without_tag TAGS: []" ] + [ "$$(ansible-playbook --list-tasks --skip-tags tag test_tags.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_always_tag TAGS: [always] Task_without_tag TAGS: []" ] -blocks: +blocks: setup # remove old output log rm -f block_test.out # run the test and check to make sure the right number of completions was logged - ansible-playbook -vv test_blocks/main.yml | tee block_test.out + ansible-playbook -vv -e outputdir=$(TEST_DIR) test_blocks/main.yml | tee block_test.out [ "$$(grep 'TEST COMPLETE' block_test.out | wc -l)" = "$$(egrep '^[0-9]+ plays in' block_test.out | cut -f1 -d' ')" ] # cleanup the output log again, to make sure the test is clean rm -f block_test.out # run test with free strategy and again count the completions - ansible-playbook -vv test_blocks/main.yml -e test_strategy=free | tee block_test.out + ansible-playbook -vv -e outputdir=$(TEST_DIR) test_blocks/main.yml -e test_strategy=free | tee block_test.out [ "$$(grep 'TEST COMPLETE' block_test.out | wc -l)" = "$$(egrep '^[0-9]+ plays in' block_test.out | cut -f1 -d' ')" ] cloud: amazon rackspace azure @@ -204,25 +209,25 @@ endif test_galaxy: test_galaxy_spec test_galaxy_yaml test_galaxy_git -test_galaxy_spec: +test_galaxy_spec: setup mytmpdir=$(MYTMPDIR) ; \ ansible-galaxy install -r galaxy_rolesfile -p $$mytmpdir/roles -vvvv ; \ cp galaxy_playbook.yml $$mytmpdir ; \ - ansible-playbook -i $(INVENTORY) $$mytmpdir/galaxy_playbook.yml -v $(TEST_FLAGS) ; \ + ansible-playbook -i $(INVENTORY) $$mytmpdir/galaxy_playbook.yml -e outputdir=$(TEST_DIR) -v $(TEST_FLAGS) ; \ RC=$$? ; \ rm -rf $$mytmpdir ; \ exit $$RC -test_galaxy_yaml: +test_galaxy_yaml: setup mytmpdir=$(MYTMPDIR) ; \ ansible-galaxy install -r galaxy_roles.yml -p $$mytmpdir/roles -vvvv; \ cp galaxy_playbook.yml $$mytmpdir ; \ - ansible-playbook -i $(INVENTORY) $$mytmpdir/galaxy_playbook.yml -v $(TEST_FLAGS) ; \ + ansible-playbook -i $(INVENTORY) $$mytmpdir/galaxy_playbook.yml -e outputdir=$(TEST_DIR) -v $(TEST_FLAGS) ; \ RC=$$? ; \ rm -rf $$mytmpdir ; \ exit $$RC -test_galaxy_git: +test_galaxy_git: setup mytmpdir=$(MYTMPDIR) ; \ ansible-galaxy install git+https://bitbucket.org/willthames/git-ansible-galaxy,v1.6 -p $$mytmpdir/roles -vvvv; \ cp galaxy_playbook_git.yml $$mytmpdir ; \ @@ -231,9 +236,9 @@ test_galaxy_git: rm -rf $$mytmpdir ; \ exit $$RC -test_lookup_paths: - ansible-playbook lookup_paths/play.yml -i $(INVENTORY) -v $(TEST_FLAGS) +test_lookup_paths: setup + ansible-playbook lookup_paths/play.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -v $(TEST_FLAGS) -no_log: +no_log: setup # This test expects 7 loggable vars and 0 non loggable ones, if either mismatches it fails, run the ansible-playbook command to debug - [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -vvvvv | awk --source 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ] + [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -vvvvv | awk --source 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ] diff --git a/test/integration/roles/prepare_tests/tasks/main.yml b/test/integration/roles/prepare_tests/tasks/main.yml index 7983ea52361..a0af45cdf10 100644 --- a/test/integration/roles/prepare_tests/tasks/main.yml +++ b/test/integration/roles/prepare_tests/tasks/main.yml @@ -17,16 +17,15 @@ # along with Ansible. If not, see <http://www.gnu.org/licenses/>. -- name: clean out the test directory - file: name={{output_dir|mandatory}} state=absent - always_run: True - tags: - - prepare - when: clean_working_dir|default("yes")|bool - -- name: create the test directory - file: name={{output_dir}} state=directory - always_run: True - tags: - - prepare - +#- name: clean out the test directory +# file: name={{output_dir|mandatory}} state=absent +# always_run: True +# tags: +# - prepare +# when: clean_working_dir|default("yes")|bool +# +#- name: create the test directory +# file: name={{output_dir}} state=directory +# always_run: True +# tags: +# - prepare From 92dcad8e9985f7f44c04b28f4c0fa0c8ea603f0f Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 3 Mar 2016 15:23:44 -0500 Subject: [PATCH 0821/1113] Adding docker container capabilities to Travis --- .travis.yml | 28 +++++++-------- test/utils/docker/centos7/Dockerfile | 48 +++++++++++++++++++++++++ test/utils/docker/ubuntu1404/Dockerfile | 34 ++++++++++++++++++ test/utils/run_tests.sh | 15 ++++++++ 4 files changed, 109 insertions(+), 16 deletions(-) create mode 100644 test/utils/docker/centos7/Dockerfile create mode 100644 test/utils/docker/ubuntu1404/Dockerfile create mode 100755 test/utils/run_tests.sh diff --git a/.travis.yml b/.travis.yml index 603132f722c..9c9788db1a9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,16 +1,20 @@ -sudo: false +sudo: required +services: + - docker language: python matrix: include: - - env: TOXENV=py24 INTEGRATION=no - - env: TOXENV=py26 INTEGRATION=yes + - env: TARGET=sanity TOXENV=py24 + - env: TARGET=sanity TOXENV=py26 python: 2.6 - - env: TOXENV=py27 INTEGRATION=yes + - env: TARGET=sanity TOXENV=py27 python: 2.7 - - env: TOXENV=py34 INTEGRATION=no + - env: TARGET=sanity TOXENV=py34 python: 3.4 - - env: TOXENV=py35 INTEGRATION=no + - env: TARGET=sanity TOXENV=py35 python: 3.5 + - env: TARGET=centos7 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" + - env: TARGET=ubuntu1404 addons: apt: sources: @@ -18,16 +22,8 @@ addons: packages: - python2.4 install: - - pip install tox PyYAML Jinja2 sphinx + - pip install tox script: -# urllib2's defaults are not secure enough for us -- ./test/code-smell/replace-urlopen.sh . -- ./test/code-smell/use-compat-six.sh lib -- ./test/code-smell/boilerplate.sh -- ./test/code-smell/required-and-default-attributes.sh -- if test x"$TOXENV" != x'py24' ; then tox ; fi -- if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi - #- make -C docsite all -- if test x"$INTEGRATION" = x'yes' ; then source ./hacking/env-setup && cd test/integration/ && make parsing && make test_var_precedence && make unicode ; fi + - ./test/utils/run_tests.sh after_success: - coveralls diff --git a/test/utils/docker/centos7/Dockerfile b/test/utils/docker/centos7/Dockerfile new file mode 100644 index 00000000000..8994461171c --- /dev/null +++ b/test/utils/docker/centos7/Dockerfile @@ -0,0 +1,48 @@ +# Latest version of centos +FROM centos:centos7 +ENV LC_ALL en_US.UTF-8 + +#RUN yum -y swap fakesystemd systemd +RUN yum -y update; yum clean all; yum -y swap fakesystemd systemd + +RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ +rm -f /lib/systemd/system/multi-user.target.wants/*; \ +rm -f /etc/systemd/system/*.wants/*; \ +rm -f /lib/systemd/system/local-fs.target.wants/*; \ +rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ +rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ +rm -f /lib/systemd/system/basic.target.wants/*; \ +rm -f /lib/systemd/system/anaconda.target.wants/*; + +RUN yum -y install \ + dbus-python \ + epel-release \ + git \ + make \ + mercurial \ + rubygems \ + subversion \ + sudo \ + unzip \ + which + +RUN yum -y install \ + PyYAML \ + python-coverage \ + python-httplib2 \ + python-jinja2 \ + python-keyczar \ + python-mock \ + python-nose \ + python-paramiko \ + python-pip \ + python-setuptools \ + python-virtualenv + +RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers +RUN mkdir /etc/ansible/ +RUN echo -e '[local]\nlocalhost' > /etc/ansible/hosts + +VOLUME /sys/fs/cgroup /run /tmp +ENV container=docker +CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/ubuntu1404/Dockerfile b/test/utils/docker/ubuntu1404/Dockerfile new file mode 100644 index 00000000000..d65d4a28a45 --- /dev/null +++ b/test/utils/docker/ubuntu1404/Dockerfile @@ -0,0 +1,34 @@ +FROM ubuntu:trusty +ENV LC_ALL en_US.UTF-8 + +RUN apt-get clean; apt-get update -y; + +RUN apt-get install -y \ + debianutils \ + git \ + make \ + mercurial \ + ruby \ + subversion \ + sudo \ + unzip + +RUN apt-get install -y \ + python-coverage \ + python-httplib2 \ + python-jinja2 \ + python-keyczar \ + python-mock \ + python-nose \ + python-paramiko \ + python-pip \ + python-setuptools \ + python-virtualenv \ + python-yaml + +RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers +RUN mkdir /etc/ansible/ +RUN echo -e '[local]\nlocalhost' > /etc/ansible/hosts + +VOLUME /sys/fs/cgroup /run /tmp +ENV container=docker diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh new file mode 100755 index 00000000000..58ee87e7012 --- /dev/null +++ b/test/utils/run_tests.sh @@ -0,0 +1,15 @@ +#!/bin/sh -x + +if [ "${TARGET}" = "sanity" ]; then + ./test/code-smell/replace-urlopen.sh . + ./test/code-smell/use-compat-six.sh lib + ./test/code-smell/boilerplate.sh + ./test/code-smell/required-and-default-attributes.sh + if test x"$TOXENV" != x'py24' ; then tox ; fi + if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi +else + docker build -t ansible_test/${TARGET} test/utils/docker/${TARGET} + docker run -d --volume="${PWD}:/root/ansible" ${TARGET_OPTIONS} ansible_test/${TARGET} > /tmp/cid_${TARGET} + docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c 'cd /root/ansible; . hacking/env-setup; make tests && (cd test/integration; make)' + docker kill $(cat /tmp/cid_${TARGET}) +fi From 5feb20a361323d1be525207fc0e9f630cbad6217 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 3 Mar 2016 15:53:06 -0500 Subject: [PATCH 0822/1113] Trying out the travis gce architecture --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 9c9788db1a9..d5767384f57 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,4 @@ +dist: precise sudo: required services: - docker From 0776258e94e38939596b75ea77e450c5bd9853be Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 3 Mar 2016 15:57:22 -0500 Subject: [PATCH 0823/1113] Actually trying out legacy group for travis --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index d5767384f57..d875722f978 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,5 @@ dist: precise +group: legacy sudo: required services: - docker From 0739fd2030ed052a5c54fae21f5447b6bde2234d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 3 Mar 2016 15:59:06 -0500 Subject: [PATCH 0824/1113] Now trying out trusty group on travis --- .travis.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index d875722f978..8926a10afb4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,4 @@ -dist: precise -group: legacy +dist: trusty sudo: required services: - docker From d8876b5c033b1a4609946b7c70536386caf2838d Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Thu, 3 Mar 2016 16:45:47 -0600 Subject: [PATCH 0825/1113] Run /sbin/init in the ubuntu docker image, as we do with centos --- test/utils/docker/ubuntu1404/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/test/utils/docker/ubuntu1404/Dockerfile b/test/utils/docker/ubuntu1404/Dockerfile index d65d4a28a45..55697d43a71 100644 --- a/test/utils/docker/ubuntu1404/Dockerfile +++ b/test/utils/docker/ubuntu1404/Dockerfile @@ -32,3 +32,4 @@ RUN echo -e '[local]\nlocalhost' > /etc/ansible/hosts VOLUME /sys/fs/cgroup /run /tmp ENV container=docker +CMD ["/sbin/init"] From b00a620fa5618d2f5b9c579211b0f0ea905f8dfd Mon Sep 17 00:00:00 2001 From: Evgeni Golov <evgeni@golov.de> Date: Thu, 3 Mar 2016 23:57:30 +0100 Subject: [PATCH 0826/1113] don't escape asterisks in parameter lists --- docsite/rst/intro_inventory.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index 3f3f2e17d36..b52183b3845 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -253,11 +253,11 @@ Remote host environment parameters:: Setting this to 'csh' or 'fish' will cause commands executed on target systems to follow those shell's syntax instead. ansible_python_interpreter The target host python path. This is useful for systems with more - than one Python or not located at "/usr/bin/python" such as \*BSD, or where /usr/bin/python + than one Python or not located at "/usr/bin/python" such as *BSD, or where /usr/bin/python is not a 2.X series Python. We do not use the "/usr/bin/env" mechanism as that requires the remote user's path to be set right and also assumes the "python" executable is named python, where the executable might be named something like "python26". - ansible\_\*\_interpreter + ansible_*_interpreter Works for anything such as ruby or perl and works just like ansible_python_interpreter. This replaces shebang of modules which will run on that host. From b2a6784a9fc1a8e1165d4f3b2b5d23b67d711c5f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Mar 2016 18:03:02 -0500 Subject: [PATCH 0827/1113] changed the output to keep backwards compat still shows path when verbose --- lib/ansible/plugins/callback/profile_tasks.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/callback/profile_tasks.py b/lib/ansible/plugins/callback/profile_tasks.py index 644c5653a56..19bdd5bf4c2 100644 --- a/lib/ansible/plugins/callback/profile_tasks.py +++ b/lib/ansible/plugins/callback/profile_tasks.py @@ -99,7 +99,9 @@ class CallbackModule(CallbackBase): # Record the start time of the current task self.current = task._uuid - self.stats[self.current] = {'time': time.time(), 'name': task.get_name(), 'path': task.get_path()} + self.stats[self.current] = {'time': time.time(), 'name': task.get_name()} + if self._display.verbosity >= 2: + self.stats[self.current][ 'path'] = task.get_path() def v2_playbook_on_task_start(self, task, is_conditional): self._record_task(task) @@ -131,10 +133,8 @@ class CallbackModule(CallbackBase): # Print the timings for uuid, result in results: - self._display.display( - "{0:-<70}{1:-<70}{2:->9}".format( - '{0} '.format(result['path']), - '{0} '.format(result['name']), - ' {0:.02f}s'.format(result['time']), - ) - ) + msg = '' + msg="{0:-<70}{1:->9}".format('{0} '.format(result['name']),' {0:.02f}s'.format(result['time'])) + if 'path' in result: + msg += "\n{0:-<79}".format( '{0} '.format(result['path'])) + self._display.display(msg) From 6cad356b3bd548888c7f81f9240c46c12c2728ae Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Mar 2016 18:05:39 -0500 Subject: [PATCH 0828/1113] updated docs to show new output --- lib/ansible/plugins/callback/profile_tasks.rst | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/callback/profile_tasks.rst b/lib/ansible/plugins/callback/profile_tasks.rst index 97c0685d62a..aad84774ea0 100644 --- a/lib/ansible/plugins/callback/profile_tasks.rst +++ b/lib/ansible/plugins/callback/profile_tasks.rst @@ -55,14 +55,19 @@ No more wondering how old the results in a terminal window are. ansible <args here> <normal output here> - PLAY RECAP ******************************************************************** + PLAY RECAP ******************************************************************** Thursday 11 June 2016 22:51:00 +0100 (0:00:01.011) 0:00:43.247 ********* =============================================================================== - /home/bob/ansible/roles/old_and_slow/tasks/main.yml:4 ----------------old_and_slow : install tons of packages -------------------------------- 20.03s - /home/bob/ansible/roles/db/tasks/main.yml:4 --------------------------db : second task to run ------------------------------------------------- 2.03s - None -----------------------------------------------------------------setup ------------------------------------------------------------------- 0.42s - /home/bob/ansible/roles/www/tasks/main.yml:1 -------------------------www : first task to run ------------------------------------------------- 0.03s - /home/bob/ansible/roles/fast_task.yml:1 ------------------------------fast_task : first task to run ------------------------------------------- 0.01s + old_and_slow : install tons of packages -------------------------------- 20.03s + /home/bob/ansible/roles/old_and_slow/tasks/main.yml:4 ------------------------- + db : second task to run ------------------------------------------------- 2.03s + /home/bob/ansible/roles/db/tasks/main.yml:4 ----------------------------------- + setup ------------------------------------------------------------------- 0.42s + None -------------------------------------------------------------------------- + www : first task to run ------------------------------------------------- 0.03s + /home/bob/ansible/roles/www/tasks/main.yml:1 ---------------------------------- + fast_task : first task to run ------------------------------------------- 0.01s + /home/bob/ansible/roles/fast_task.yml:1 --------------------------------------- Compatibility ------------- From cc3cb0f65e4904775ca4e3eb277a606943e78887 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Mar 2016 18:13:36 -0500 Subject: [PATCH 0829/1113] fix issues with older yaml lib versions also added missing json import and removed unused ones --- lib/ansible/parsing/dataloader.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py index a508eacdda6..8b37df5af89 100644 --- a/lib/ansible/parsing/dataloader.py +++ b/lib/ansible/parsing/dataloader.py @@ -21,10 +21,9 @@ __metaclass__ = type import copy import os -import stat +import json import subprocess - -from yaml import load, YAMLError +from yaml import YAMLError from ansible.compat.six import text_type, string_types from ansible.errors import AnsibleFileNotFound, AnsibleParserError, AnsibleError @@ -145,7 +144,10 @@ class DataLoader(): try: return loader.get_single_data() finally: - loader.dispose() + try: + loader.dispose() + except AttributeError: + pass # older versions of yaml don't have dispose function, ignore def _get_file_contents(self, file_name): ''' From 0647fadf0c41fd551be7a2f9bb01fe7ee78fcdd6 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Mar 2016 18:17:58 -0500 Subject: [PATCH 0830/1113] added rackhd inv script to chnlog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b2b213e6e3..216cd72d46e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,9 @@ Ansible Changes By Release * slack * json +####New Inventory scripts: +* rackhd + ###Minor Changes: * callbacks now have access to the options with which the CLI was called From e283cba628f6c0a4aa6f53162df42f886c2151d5 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Mar 2016 19:31:48 -0500 Subject: [PATCH 0831/1113] added new modules and feature data --- CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 216cd72d46e..f3d871bb9ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,15 @@ Ansible Changes By Release ####New Modules: * aws: ec2_vol_facts -* aws: ec2_vpc_dhcp_options.py +* aws: ec2_vpc_dhcp_options * aws: ec2_vpc_net_facts * cloudstack: cs_volume +* cloudstack: cs_configuration +* cloudstack: cs_resourcelimit +* cloudstack: cs_instance_facts +* cloudstack: cs_pod +* cloudstack: cs_cluster +* cloudstack: cs_zone * win_regmerge * win_timezone * yum_repository @@ -32,6 +38,7 @@ Ansible Changes By Release * callbacks now have access to the options with which the CLI was called * debug is now controlable with verbosity +* modules now get verbosity, diff and other flags as passed to ansible ## 2.0.1 "Over the Hills and Far Away" From e76209549771d15f9a12fe12aa696df8c88c468a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 3 Mar 2016 19:50:52 -0500 Subject: [PATCH 0832/1113] better task parsing errors fixes #14790 --- lib/ansible/parsing/splitter.py | 6 +++--- lib/ansible/playbook/task.py | 10 +++++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py index fa26242fcfc..8f3c5cc0888 100644 --- a/lib/ansible/parsing/splitter.py +++ b/lib/ansible/parsing/splitter.py @@ -22,7 +22,7 @@ __metaclass__ = type import re import codecs -from ansible.errors import AnsibleError +from ansible.errors import AnsibleParserError from ansible.parsing.quoting import unquote # Decode escapes adapted from rspeer's answer here: @@ -60,7 +60,7 @@ def parse_kv(args, check_raw=False): vargs = split_args(args) except ValueError as ve: if 'no closing quotation' in str(ve).lower(): - raise AnsibleError("error parsing argument string, try quoting the entire line.") + raise AnsibleParsingError("error parsing argument string, try quoting the entire line.") else: raise @@ -256,6 +256,6 @@ def split_args(args): # If we're done and things are not at zero depth or we're still inside quotes, # raise an error to indicate that the args were unbalanced if print_depth or block_depth or comment_depth or inside_quotes: - raise AnsibleError("error while splitting arguments, either an unbalanced jinja2 block or quotes") + raise AnsibleParserError("failed at splitting arguments, either an unbalanced jinja2 block or quotes") return params diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 556aea5043a..c97e81e9648 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -21,13 +21,12 @@ __metaclass__ = type from ansible.compat.six import iteritems, string_types -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleParserError from ansible.parsing.mod_args import ModuleArgsParser from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping, AnsibleUnicode from ansible.plugins import lookup_loader - from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base from ansible.playbook.become import Become @@ -36,6 +35,8 @@ from ansible.playbook.conditional import Conditional from ansible.playbook.role import Role from ansible.playbook.taggable import Taggable +from ansible.utils.unicode import to_str + try: from __main__ import display except ImportError: @@ -168,7 +169,10 @@ class Task(Base, Conditional, Taggable, Become): # and the delegate_to value from the various possible forms # supported as legacy args_parser = ModuleArgsParser(task_ds=ds) - (action, args, delegate_to) = args_parser.parse() + try: + (action, args, delegate_to) = args_parser.parse() + except AnsibleParserError as e: + raise AnsibleParserError(to_str(e), obj=ds) # the command/shell/script modules used to support the `cmd` arg, # which corresponds to what we now call _raw_params, so move that From e4acd44e2393bc59ae33127111cf55fe0a15eb38 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 3 Mar 2016 20:43:59 -0500 Subject: [PATCH 0833/1113] Make sure test script fails on any error --- test/utils/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index 58ee87e7012..b725110e16e 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -1,4 +1,4 @@ -#!/bin/sh -x +#!/bin/sh -xe if [ "${TARGET}" = "sanity" ]; then ./test/code-smell/replace-urlopen.sh . From f56af2ec3f667e70713c34e20c32c85e55af9f36 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 3 Mar 2016 21:03:28 -0500 Subject: [PATCH 0834/1113] Updating docker files for tests --- test/utils/docker/centos7/Dockerfile | 8 -------- test/utils/docker/ubuntu1404/Dockerfile | 7 ------- 2 files changed, 15 deletions(-) diff --git a/test/utils/docker/centos7/Dockerfile b/test/utils/docker/centos7/Dockerfile index 8994461171c..9cbbe4583dc 100644 --- a/test/utils/docker/centos7/Dockerfile +++ b/test/utils/docker/centos7/Dockerfile @@ -1,10 +1,6 @@ # Latest version of centos FROM centos:centos7 -ENV LC_ALL en_US.UTF-8 - -#RUN yum -y swap fakesystemd systemd RUN yum -y update; yum clean all; yum -y swap fakesystemd systemd - RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ rm -f /lib/systemd/system/multi-user.target.wants/*; \ rm -f /etc/systemd/system/*.wants/*; \ @@ -13,7 +9,6 @@ rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ rm -f /lib/systemd/system/basic.target.wants/*; \ rm -f /lib/systemd/system/anaconda.target.wants/*; - RUN yum -y install \ dbus-python \ epel-release \ @@ -25,7 +20,6 @@ RUN yum -y install \ sudo \ unzip \ which - RUN yum -y install \ PyYAML \ python-coverage \ @@ -38,11 +32,9 @@ RUN yum -y install \ python-pip \ python-setuptools \ python-virtualenv - RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN echo -e '[local]\nlocalhost' > /etc/ansible/hosts - VOLUME /sys/fs/cgroup /run /tmp ENV container=docker CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/ubuntu1404/Dockerfile b/test/utils/docker/ubuntu1404/Dockerfile index 55697d43a71..b013f6b42aa 100644 --- a/test/utils/docker/ubuntu1404/Dockerfile +++ b/test/utils/docker/ubuntu1404/Dockerfile @@ -1,8 +1,5 @@ FROM ubuntu:trusty -ENV LC_ALL en_US.UTF-8 - RUN apt-get clean; apt-get update -y; - RUN apt-get install -y \ debianutils \ git \ @@ -12,7 +9,6 @@ RUN apt-get install -y \ subversion \ sudo \ unzip - RUN apt-get install -y \ python-coverage \ python-httplib2 \ @@ -25,11 +21,8 @@ RUN apt-get install -y \ python-setuptools \ python-virtualenv \ python-yaml - RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN echo -e '[local]\nlocalhost' > /etc/ansible/hosts - -VOLUME /sys/fs/cgroup /run /tmp ENV container=docker CMD ["/sbin/init"] From 069e597aaa99a203f9fb9c1b131e3c6c60194f70 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 3 Mar 2016 22:29:21 -0500 Subject: [PATCH 0835/1113] Fixing locale stuff in ubuntu 14.04 Dockerfile --- test/utils/docker/ubuntu1404/Dockerfile | 2 ++ test/utils/run_tests.sh | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/test/utils/docker/ubuntu1404/Dockerfile b/test/utils/docker/ubuntu1404/Dockerfile index b013f6b42aa..98efd3f093f 100644 --- a/test/utils/docker/ubuntu1404/Dockerfile +++ b/test/utils/docker/ubuntu1404/Dockerfile @@ -3,6 +3,7 @@ RUN apt-get clean; apt-get update -y; RUN apt-get install -y \ debianutils \ git \ + locales \ make \ mercurial \ ruby \ @@ -24,5 +25,6 @@ RUN apt-get install -y \ RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN echo -e '[local]\nlocalhost' > /etc/ansible/hosts +RUN locale-gen en_US.UTF-8 ENV container=docker CMD ["/sbin/init"] diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index b725110e16e..33ac7338d81 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -10,6 +10,6 @@ if [ "${TARGET}" = "sanity" ]; then else docker build -t ansible_test/${TARGET} test/utils/docker/${TARGET} docker run -d --volume="${PWD}:/root/ansible" ${TARGET_OPTIONS} ansible_test/${TARGET} > /tmp/cid_${TARGET} - docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c 'cd /root/ansible; . hacking/env-setup; make tests && (cd test/integration; make)' + docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c 'cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)' docker kill $(cat /tmp/cid_${TARGET}) fi From 253376b3b7dba9a937f4c1a9bc315a9326108950 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 3 Mar 2016 20:43:12 -0800 Subject: [PATCH 0836/1113] Make the source path into a byte string before passing to os.path --- lib/ansible/plugins/action/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index c50076c5430..8941db434b0 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -107,7 +107,7 @@ class ActionModule(ActionBase): source_files = [] # If source is a directory populate our list else source is a file and translate it to a tuple. - if os.path.isdir(source): + if os.path.isdir(to_bytes(source)): # Get the amount of spaces to remove to get the relative path. if source_trailing_slash: sz = len(source) From 8f1303c81a40a249571970a1ecf727f0401c8bf5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 3 Mar 2016 20:56:50 -0800 Subject: [PATCH 0837/1113] Remember to use errors=strict since this is looking up a filename on the filesystem (ie it has to match exactly) --- lib/ansible/plugins/action/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index 8941db434b0..a833b28b160 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -107,7 +107,7 @@ class ActionModule(ActionBase): source_files = [] # If source is a directory populate our list else source is a file and translate it to a tuple. - if os.path.isdir(to_bytes(source)): + if os.path.isdir(to_bytes(source, errors='strict')): # Get the amount of spaces to remove to get the relative path. if source_trailing_slash: sz = len(source) From fa630872d9f511b5ea2b53f71468b7963bbf586a Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 4 Mar 2016 02:48:32 -0500 Subject: [PATCH 0838/1113] Minor update to the Dockerfiles --- test/utils/docker/centos7/Dockerfile | 3 ++- test/utils/docker/ubuntu1404/Dockerfile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/test/utils/docker/centos7/Dockerfile b/test/utils/docker/centos7/Dockerfile index 9cbbe4583dc..6abac609222 100644 --- a/test/utils/docker/centos7/Dockerfile +++ b/test/utils/docker/centos7/Dockerfile @@ -12,6 +12,7 @@ rm -f /lib/systemd/system/anaconda.target.wants/*; RUN yum -y install \ dbus-python \ epel-release \ + file \ git \ make \ mercurial \ @@ -34,7 +35,7 @@ RUN yum -y install \ python-virtualenv RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ -RUN echo -e '[local]\nlocalhost' > /etc/ansible/hosts +RUN echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts VOLUME /sys/fs/cgroup /run /tmp ENV container=docker CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/ubuntu1404/Dockerfile b/test/utils/docker/ubuntu1404/Dockerfile index 98efd3f093f..a0144e56d5a 100644 --- a/test/utils/docker/ubuntu1404/Dockerfile +++ b/test/utils/docker/ubuntu1404/Dockerfile @@ -24,7 +24,7 @@ RUN apt-get install -y \ python-yaml RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ -RUN echo -e '[local]\nlocalhost' > /etc/ansible/hosts +RUN echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts RUN locale-gen en_US.UTF-8 ENV container=docker CMD ["/sbin/init"] From b33074b70329a080771ee425f32c8a65bd7c094b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A4=8F=E6=81=BA=28Xia=20Kai=29?= <xiaket@gmail.com> Date: Fri, 4 Mar 2016 07:58:18 +0000 Subject: [PATCH 0839/1113] remove main_q for simplicity. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit main_q is not used anywhere in the codebase. It is created in TaskQueueManager._initialize_processes, bundled with rslt_q into TaskQueueManger._workers, later unwrapped in StrategyBase but not used. This queue is closed in TaskQueueManger._cleanup_processes. Historically, it is passed as a init parameter into WorkerProcess, introduced in 62d7956, but this behavior is changed in 120b9a7. Signed-off-by: 夏恺(Xia Kai) <xiaket@gmail.com> --- lib/ansible/executor/process/result.py | 2 +- lib/ansible/executor/task_queue_manager.py | 6 ++---- lib/ansible/plugins/strategy/__init__.py | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 7c75bbdfc21..4c4ace05d19 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -65,7 +65,7 @@ class ResultProcess(multiprocessing.Process): result = None starting_point = self._cur_worker while True: - (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] + (worker_prc, rslt_q) = self._workers[self._cur_worker] self._cur_worker += 1 if self._cur_worker >= len(self._workers): self._cur_worker = 0 diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index bed9879c421..601d27c58b5 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -99,9 +99,8 @@ class TaskQueueManager: self._workers = [] for i in range(num): - main_q = multiprocessing.Queue() rslt_q = multiprocessing.Queue() - self._workers.append([None, main_q, rslt_q]) + self._workers.append([None, rslt_q]) self._result_prc = ResultProcess(self._final_q, self._workers) self._result_prc.start() @@ -249,9 +248,8 @@ class TaskQueueManager: if self._result_prc: self._result_prc.terminate() - for (worker_prc, main_q, rslt_q) in self._workers: + for (worker_prc, rslt_q) in self._workers: rslt_q.close() - main_q.close() if worker_prc and worker_prc.is_alive(): try: worker_prc.terminate() diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 8d40aaaefeb..790f988b5a9 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -153,7 +153,7 @@ class StrategyBase: queued = False while True: - (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] + (worker_prc, rslt_q) = self._workers[self._cur_worker] if worker_prc is None or not worker_prc.is_alive(): worker_prc = WorkerProcess(rslt_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj) self._workers[self._cur_worker][0] = worker_prc From 11c18141461d21a99815fb69eef61ca526c6d140 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Fri, 4 Mar 2016 08:08:04 -0500 Subject: [PATCH 0840/1113] 14604 - Adding suggested comments related to securing Galaxy role versions --- docs/proposals/auto-install-roles.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/proposals/auto-install-roles.md b/docs/proposals/auto-install-roles.md index b63349bb302..9fb17fc2b1e 100644 --- a/docs/proposals/auto-install-roles.md +++ b/docs/proposals/auto-install-roles.md @@ -125,6 +125,7 @@ Here's the approach: 'name': 'repo’ } ``` +- For roles installed from Galaxy, Galaxy should provide some measure of security against version change. Galaxy should track the commit related to a version. If the role owner changes historical versions (today tags) and thus changes the commit hash, the affected version would become un-installable. - Refactor the install process to encompass the following : From 2da2dc621b37722c87ace991af5712653e7e0e81 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Fri, 4 Mar 2016 07:18:05 -0600 Subject: [PATCH 0841/1113] Fix callback example in porting guide. See #14772 --- docsite/rst/porting_guide_2.0.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index ee6e65c150f..44a920f984c 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -229,16 +229,19 @@ for most callback plugins. However, if your callback plugin makes use of have to store the values for these yourself as ansible no longer automatically populates the callback with them. Here's a short snippet that shows you how:: + import os from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): def __init__(self): self.playbook = None + self.playbook_name = None self.play = None self.task = None def v2_playbook_on_start(self, playbook): self.playbook = playbook + self.playbook_name = os.path.basename(self.playbook._filename) def v2_playbook_on_play_start(self, play): self.play = play @@ -247,7 +250,7 @@ populates the callback with them. Here's a short snippet that shows you how:: self.task = task def v2_on_any(self, *args, **kwargs): - self._display.display('%s: %s: %s' % (self.playbook.name, + self._display.display('%s: %s: %s' % (self.playbook_name, self.play.name, self.task)) From 08777fdecd7dd1470c25fc24055942cbdc250247 Mon Sep 17 00:00:00 2001 From: Robyn Bergeron <robyn.bergeron@gmail.com> Date: Fri, 4 Mar 2016 07:12:13 -0700 Subject: [PATCH 0842/1113] Create proposals_process_proposal.MD Created proposals process proposal. Inception! --- docs/proposals/proposals_process_proposal.MD | 107 +++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 docs/proposals/proposals_process_proposal.MD diff --git a/docs/proposals/proposals_process_proposal.MD b/docs/proposals/proposals_process_proposal.MD new file mode 100644 index 00000000000..28a2b208e20 --- /dev/null +++ b/docs/proposals/proposals_process_proposal.MD @@ -0,0 +1,107 @@ +# Proposal: Proposals - have a process and documentation + +*Author*: Robyn Bergeron <@robynbergeron> + +*Date*: 04/03/2016 + +- Status: New +- Proposal type: community development process +- Targeted Release: Forever, until we improve it more at a later date. +- PR for Comments: (will submit update after submitted) +- Estimated time to implement: 2 weeks at most + +Comments on this proposal prior to acceptance are accepted in the comments section of the pull request linked above. + +## Motivation +Define light process for how proposals are created and accepted, and document the process permanently in community.html somewhere. + +The following suggested process was created with the following ideas in mind: +- Transparency: notifications, decisions made in public meetings, etc. helps people to know what is going on. +- Avoid proliferation of multiple comments in multiple places; keep everything in the PR. +- Action is being taken: Knowing when and where decisions are made, and knowing who is the final authority, gives people the sense that things are moving. +- Ensure that new features or enhancements are added to the roadmap and release notes. + +### Problems +Proposals are confusing. Should I write one? Where do I put it? Why can’t I find any documentation about this? Who approves things? This is why we should have a light and unbureaucratic process. + +## Solution proposal +This proposal has multiple parts: +- Proposed process for submitting / accepting proposals +- Suggested proposal template + +Once the process and template are approved, a PR will be submitted for documenting the process permanently in documentation, as well as a PR to ansible/docs/proposals for the proposal template. + +### Proposed Process +1: PROPOSAL CREATION +- Person making the proposal creates the proposal document in ansible/docs/proposals via PR, following the proposal template. +- Author of proposal PR updates the proposal with the PR # / link. +- Notify the community that this proposal exists. +- Author notifies ansible-devel mailing list for transparency, providing link to PR. +- Author includes commentary indicating that comments should *not* be in response to this email, but rather, community members should add comments or feedback to the pull request. + +2: KEEP THE PROPOSAL MOVING TOWARDS A DECISION. +- Proposals use public meetings as a mechanism to keep them moving. +- All proposals are decided on in a public meeting by a combination of folks with commit access to Ansible and any interested parties / users, as well as the author of the proposal. Time for approvals will be a portion of the overall schedule; proposals will be reviewed in the order received and may occasionally be deferred to the next meeting. If we are overwhelmed, a separate meeting may be scheduled. + +(Note: ample feedback in the comments of the proposal PR should allow for folks to come to broad consensus in one way or another in the meeting rather rapidly, generally without an actual counted vote. However, the decision should be made *in the meeting*, so as to avoid any questions around whether or not the approval of one Ansible maintain / committer reflects the opinions or decision of everyone.) + +- *New* proposals are explicitly added to the public IRC meeting agenda for each week by the meeting organizer for for acknowledgement of ongoing discussion and existence, and/or easy approval/rejection. (Either via a separate issue somewhere tracking any meeting items, or by adding a “meeting” label to the PR.) +- Existing new, not-yet-approved proposals are reviewed weekly by meeting organizer to check for slow-moving/stalled proposals, or for flags from the proposal owner indicating that they'd like to have it addressed in the weeks meeting + +3: PROPOSAL APPROVED +- Amendments needed to the proposal after IRC discussion should be made immediately. +- The proposal status should be changed to Approved / In Progress in the document. +- The proposal should be moved from /docs/proposals to a docs/roadmap folder (or similar). +- The proposal PR comments should be updated with a note by the meeting organizer that the proposal has been accepted, and further commentary should be in the PRs implementing the code itself. +- Proposals can also be PENDING (waiting on something), or DECLINED. + +4: CODE IN PROGRESS +- Approved proposals should be periodically checked for progress, especially if tied to a release and/or is noted as release blocking. +- PRs implementing the proposal are recommended to link to the original proposal PR or document for context. +5: CODE COMPLETE +- Proposal document, which should be in docs/roadmap, should have their status updated to COMPLETE. +- The release notes file for the targeted release should be updated with a small note regarding the feature or enhancement; completed proposals for community processes should have a follow-up mail sent to the mailing list providing information and links to the new process. +- Hooray! Buy your friend a tasty beverage of their choosing. + +### Suggested Proposal Template Outline +Following the .md convention, a proposal template should go in the docs/proposals repository. This is a suggested outline; the template will provide more guidance / context and will be submitted as a PR upon approval of this proposal. + +Please note that, in line with the above guidance that some processes will require fine-tuning over time, that the suggested template outline below, as well as the final submitted template to the docs/proposals repo has wiggle room in terms of description, and that what makes sense may vary from one proposal to another. The expectation is that people will simply do what seems right, and over time we’ll figure out what works best — but in the meantime, guidance is nice. + +#### TEMPLATE OUTLINE +- Proposal Title +- Author (w/github ID linked) +- Date: + +- Status: New, Approved, Pending, Complete +- Proposal type: Feature / enhancement / community development process +- Targeted Release: +- PR for comments: +- Estimated time to implement: + +Comments on this proposal prior to acceptance are accepted in the comments of the PR linked above. + +- Motivation / Problems solved: +- Proposed Solution: (what you’re doing, and why; keeping this loose for now.) + +Other Suggested things to include: +- Dependencies / requirements: +- Testing: +- Documentation: + +## Dependencies / requirements + +- Approval of this proposed process is needed to create the actual documentation of the process. +- Weekly, public IRC meetings (which should probably be documented Wrt time / day of week / etc. in the contributor documentation) of the Ansible development community. +- Creation of a “meeting” label in GitHub (or defining some other mechanism to gather items for a weekly meeting agenda, such as a separate issue in GitHub that links to the PRs.) +- Coming to an agreement regarding “what qualifies as a feature or enhancement that requires a proposal, vs. just submitting a PR with code.” It could simply be that if the change is large or very complicated, our recommendation is always to file a proposal to ensure (a) transparency (b) that a contributor doesn’t waste their time on something that ultimately can’t be merged at this time. +- Nice to have: Any new proposal PR landing in docs/proposals is automatically merged and an email automatically notifies the mailing list of the existence and location of the PR for comments. + +## Testing + +Testing of this proposal will literally be via submitting this proposal through the proposed proposal process. If it fails miserably, we’ll know it needs fine-tuning or needs to go in the garbage can. + +## Documentation: + +- Documentation of the process, including “what is a feature or enhancement vs. just a regular PR,” along with the steps shown above, will be added to the Ansible documentation in .rst format via PR. The documentation should also provide guidance on the standard wording of the email notifying ansible-devel list that the proposal exists and is ready for review in the PR comments. +- A proposal template should also be created in the docs/proposals repo directory. From 7f34705b0c0cd39daaa46e53bf64401e9590d8b3 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 4 Mar 2016 09:16:10 -0500 Subject: [PATCH 0843/1113] Fixing up Dockerfiles some more to get tests passing fully --- test/utils/docker/centos7/Dockerfile | 4 +-- test/utils/docker/ubuntu1404/Dockerfile | 37 +++++++++++++++++++-- test/utils/docker/ubuntu1404/init-fake.conf | 13 ++++++++ 3 files changed, 49 insertions(+), 5 deletions(-) create mode 100644 test/utils/docker/ubuntu1404/init-fake.conf diff --git a/test/utils/docker/centos7/Dockerfile b/test/utils/docker/centos7/Dockerfile index 6abac609222..69450e0af19 100644 --- a/test/utils/docker/centos7/Dockerfile +++ b/test/utils/docker/centos7/Dockerfile @@ -33,9 +33,9 @@ RUN yum -y install \ python-pip \ python-setuptools \ python-virtualenv -RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers +RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ -RUN echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts +RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts VOLUME /sys/fs/cgroup /run /tmp ENV container=docker CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/ubuntu1404/Dockerfile b/test/utils/docker/ubuntu1404/Dockerfile index a0144e56d5a..25493c1f86e 100644 --- a/test/utils/docker/ubuntu1404/Dockerfile +++ b/test/utils/docker/ubuntu1404/Dockerfile @@ -2,6 +2,7 @@ FROM ubuntu:trusty RUN apt-get clean; apt-get update -y; RUN apt-get install -y \ debianutils \ + gawk \ git \ locales \ make \ @@ -10,6 +11,35 @@ RUN apt-get install -y \ subversion \ sudo \ unzip + +# helpful things taken from the ubuntu-upstart Dockerfile: +# https://github.com/tianon/dockerfiles/blob/4d24a12b54b75b3e0904d8a285900d88d3326361/sbin-init/ubuntu/upstart/14.04/Dockerfile +ADD init-fake.conf /etc/init/fake-container-events.conf + +# undo some leet hax of the base image +RUN rm /usr/sbin/policy-rc.d; \ + rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl +# remove some pointless services +RUN /usr/sbin/update-rc.d -f ondemand remove; \ + for f in \ + /etc/init/u*.conf \ + /etc/init/mounted-dev.conf \ + /etc/init/mounted-proc.conf \ + /etc/init/mounted-run.conf \ + /etc/init/mounted-tmp.conf \ + /etc/init/mounted-var.conf \ + /etc/init/hostname.conf \ + /etc/init/networking.conf \ + /etc/init/tty*.conf \ + /etc/init/plymouth*.conf \ + /etc/init/hwclock*.conf \ + /etc/init/module*.conf\ + ; do \ + dpkg-divert --local --rename --add "$f"; \ + done; \ + echo '# /lib/init/fstab: cleared out for bare-bones Docker' > /lib/init/fstab +# end things from ubuntu-upstart Dockerfile + RUN apt-get install -y \ python-coverage \ python-httplib2 \ @@ -22,9 +52,10 @@ RUN apt-get install -y \ python-setuptools \ python-virtualenv \ python-yaml -RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers +RUN rm /etc/apt/apt.conf.d/docker-clean +RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ -RUN echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts +RUN /bin/echo -e "[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts RUN locale-gen en_US.UTF-8 -ENV container=docker +ENV container docker CMD ["/sbin/init"] diff --git a/test/utils/docker/ubuntu1404/init-fake.conf b/test/utils/docker/ubuntu1404/init-fake.conf new file mode 100644 index 00000000000..f5db965051e --- /dev/null +++ b/test/utils/docker/ubuntu1404/init-fake.conf @@ -0,0 +1,13 @@ +# fake some events needed for correct startup other services + +description "In-Container Upstart Fake Events" + +start on startup + +script + rm -rf /var/run/*.pid + rm -rf /var/run/network/* + /sbin/initctl emit stopped JOB=udevtrigger --no-wait + /sbin/initctl emit started JOB=udev --no-wait + /sbin/initctl emit runlevel RUNLEVEL=3 --no-wait +end script From 69a144bfbe15bcbec21561bf22293a603102158b Mon Sep 17 00:00:00 2001 From: Robyn Bergeron <robyn.bergeron@gmail.com> Date: Fri, 4 Mar 2016 07:27:28 -0700 Subject: [PATCH 0844/1113] Update proposals proposal with PR link --- docs/proposals/proposals_process_proposal.MD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/proposals/proposals_process_proposal.MD b/docs/proposals/proposals_process_proposal.MD index 28a2b208e20..f8ccc9ad79d 100644 --- a/docs/proposals/proposals_process_proposal.MD +++ b/docs/proposals/proposals_process_proposal.MD @@ -7,7 +7,7 @@ - Status: New - Proposal type: community development process - Targeted Release: Forever, until we improve it more at a later date. -- PR for Comments: (will submit update after submitted) +- PR for Comments: https://github.com/ansible/ansible/pull/14802# - Estimated time to implement: 2 weeks at most Comments on this proposal prior to acceptance are accepted in the comments section of the pull request linked above. From 27a33a6f1863a29615d1a25877b4c5d052d6c9be Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 4 Mar 2016 11:34:01 -0500 Subject: [PATCH 0845/1113] dont erase previous nameservers in fact gathering fixes #14806 --- lib/ansible/module_utils/facts.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index f5558e86505..2d3378cb299 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -764,7 +764,8 @@ class Facts(object): if len(tokens) == 0: continue if tokens[0] == 'nameserver': - self.facts['dns']['nameservers'] = [] + if not 'nameservers' in self.facts['dns']: + self.facts['dns']['nameservers'] = [] for nameserver in tokens[1:]: self.facts['dns']['nameservers'].append(nameserver) elif tokens[0] == 'domain': From 5b79ed77e7f540f37a2383d0aa9c61ebe59e65fb Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Fri, 4 Mar 2016 02:11:35 -0800 Subject: [PATCH 0846/1113] Use to_bytes on filenames in filesystem calls. --- lib/ansible/parsing/dataloader.py | 2 +- lib/ansible/plugins/connection/chroot.py | 4 ++-- lib/ansible/plugins/connection/local.py | 4 ++-- lib/ansible/plugins/connection/paramiko_ssh.py | 7 ++++--- lib/ansible/plugins/connection/ssh.py | 2 +- lib/ansible/utils/hashing.py | 5 +++-- 6 files changed, 13 insertions(+), 11 deletions(-) diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py index 8b37df5af89..c0203d3cc37 100644 --- a/lib/ansible/parsing/dataloader.py +++ b/lib/ansible/parsing/dataloader.py @@ -118,7 +118,7 @@ class DataLoader(): def path_exists(self, path): path = self.path_dwim(path) - return os.path.exists(to_bytes(path)) + return os.path.exists(to_bytes(path, errors='strict')) def is_file(self, path): path = self.path_dwim(path) diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py index ba41ffb5d88..0778a5e22ca 100644 --- a/lib/ansible/plugins/connection/chroot.py +++ b/lib/ansible/plugins/connection/chroot.py @@ -127,7 +127,7 @@ class Connection(ConnectionBase): out_path = pipes.quote(self._prefix_login_path(out_path)) try: - with open(in_path, 'rb') as in_file: + with open(to_bytes(in_path, errors='strict'), 'rb') as in_file: try: p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file) except OSError: @@ -153,7 +153,7 @@ class Connection(ConnectionBase): except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") - with open(out_path, 'wb+') as out_file: + with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file: try: chunk = p.stdout.read(BUFSIZE) while chunk: diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py index 5004c3698db..79ba8ab8bc1 100644 --- a/lib/ansible/plugins/connection/local.py +++ b/lib/ansible/plugins/connection/local.py @@ -127,10 +127,10 @@ class Connection(ConnectionBase): super(Connection, self).put_file(in_path, out_path) display.vvv(u"{0} PUT {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path)) - if not os.path.exists(in_path): + if not os.path.exists(to_bytes(in_path, errors='strict')): raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_str(in_path))) try: - shutil.copyfile(in_path, out_path) + shutil.copyfile(to_bytes(in_path, errors='strict'), to_bytes(out_path, errors='strict')) except shutil.Error: raise AnsibleError("failed to copy: {0} and {1} are the same".format(to_str(in_path), to_str(out_path))) except IOError as e: diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py index 5f311756e83..557acc96900 100644 --- a/lib/ansible/plugins/connection/paramiko_ssh.py +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -43,6 +43,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase from ansible.utils.path import makedirs_safe +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -322,7 +323,7 @@ class Connection(ConnectionBase): display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) - if not os.path.exists(in_path): + if not os.path.exists(to_bytes(in_path, errors='strict')): raise AnsibleFileNotFound("file or module does not exist: %s" % in_path) try: @@ -331,7 +332,7 @@ class Connection(ConnectionBase): raise AnsibleError("failed to open a SFTP connection (%s)" % e) try: - self.sftp.put(in_path, out_path) + self.sftp.put(to_bytes(in_path, errors='strict'), to_bytes(out_path, errors='strict')) except IOError: raise AnsibleError("failed to transfer file to %s" % out_path) @@ -357,7 +358,7 @@ class Connection(ConnectionBase): raise AnsibleError("failed to open a SFTP connection (%s)", e) try: - self.sftp.get(in_path, out_path) + self.sftp.get(to_bytes(in_path, errors='strict'), to_bytes(out_path, errors='strict')) except IOError: raise AnsibleError("failed to transfer file from %s" % in_path) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index 482369fe660..328ace55374 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -621,7 +621,7 @@ class Connection(ConnectionBase): super(Connection, self).put_file(in_path, out_path) display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host) - if not os.path.exists(in_path): + if not os.path.exists(to_bytes(in_path, errors='strict')): raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_str(in_path))) # scp and sftp require square brackets for IPv6 addresses, but diff --git a/lib/ansible/utils/hashing.py b/lib/ansible/utils/hashing.py index 5e378db79f4..76c34eefbf4 100644 --- a/lib/ansible/utils/hashing.py +++ b/lib/ansible/utils/hashing.py @@ -21,6 +21,7 @@ __metaclass__ = type import os from ansible.errors import AnsibleError +from ansible.utils.unicode import to_bytes # Note, sha1 is the only hash algorithm compatible with python2.4 and with # FIPS-140 mode (as of 11-2014) @@ -54,12 +55,12 @@ def secure_hash_s(data, hash_func=sha1): def secure_hash(filename, hash_func=sha1): ''' Return a secure hash hex digest of local file, None if file is not present or a directory. ''' - if not os.path.exists(filename) or os.path.isdir(filename): + if not os.path.exists(to_bytes(filename, errors='strict')) or os.path.isdir(to_bytes(filename, errors='strict')): return None digest = hash_func() blocksize = 64 * 1024 try: - infile = open(filename, 'rb') + infile = open(to_bytes(filename, errors='strict'), 'rb') block = infile.read(blocksize) while block: digest.update(block) From d6546a75134c0498e6c46372d20a297f64356a20 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 4 Mar 2016 13:12:35 -0500 Subject: [PATCH 0847/1113] make sure group_names is always sorted this makes it consistent with previous ansilbe versions and other paths that create the group_names variable --- lib/ansible/vars/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 9cb0108ed64..1405d8736dd 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -362,7 +362,7 @@ class VariableManager: variables['playbook_dir'] = loader.get_basedir() if host: - variables['group_names'] = [group.name for group in host.get_groups() if group.name != 'all'] + variables['group_names'] = sorted([group.name for group in host.get_groups() if group.name != 'all']) if self._inventory is not None: variables['groups'] = dict() From 3d0637292cd6f476f4b3655fb93add7ee5394972 Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Fri, 4 Mar 2016 07:03:08 -0500 Subject: [PATCH 0848/1113] Update to docker_container module proposal --- .../docker/docker_container_moduler.md | 83 +++++++------------ 1 file changed, 29 insertions(+), 54 deletions(-) diff --git a/docs/proposals/docker/docker_container_moduler.md b/docs/proposals/docker/docker_container_moduler.md index e4a2b80590a..4b826ad5d13 100644 --- a/docs/proposals/docker/docker_container_moduler.md +++ b/docs/proposals/docker/docker_container_moduler.md @@ -24,12 +24,7 @@ Parameters for connecting to the API are not listed here. They are included in t ``` blkio_weight: description: - - Block IO weight - default: null - -blkio_weight_devices: - description: - - List of C(DEVICE_NAME:WEIGHT) pairs + - Block IO (relative weight), between 10 and 1000. default: null capabilities: @@ -37,16 +32,6 @@ capabilities: - List of capabilities to add to the container. default: null -capabilities_drop: - description: - - List of capabilities to remove from the container - default: null - -cgroup_parent: - description: - - Optional parent cgroup for the container - default: null - command: description: - Command or list of commands to execute in the container when it starts. @@ -54,12 +39,12 @@ command: cpu_period: description: - - Limit CPU CFS (Completely Fair Scheduler) period. Expressed in milliseconds. + - Limit CPU CFS (Completely Fair Scheduler) period default: 0 cpu_quota: description: - - Limit CPU CFS (Completely Fair Scheduler) quota. Expressed in milliseconds. + - Limit CPU CFS (Completely Fair Scheduler) quota default: 0 cpuset_cpus: @@ -74,8 +59,8 @@ cpuset_mems: cpu_shares: description: - - CPU shares. Integer value. - default: 0 + - CPU shares (relative weight). + default: null detach: description: @@ -85,27 +70,8 @@ detach: devices: description: - - List of host devices to add to the container - default: null - -device_read_bps - description: - - List. Limit read rate (bytes per second) from a device in the format C(/dev/sda:1mb) - default: null - -device_read_iops: - description: - - List. Limit read rate (IO per second) from a device in the format C(/dev/sda:1000) - default: null - -device_write_bps: - description: - - List. Limit write rate (bytes per second) to a device in the foramt C(/dev/sda:1mb) - default: null - -device_write_iops: - description: - - List. Limit write rate (IO per second) to a device C(/dev/sda:1000) + - List of host device bindings to add to the container. Each binding is a mapping expressed + in the format: <path_on_host>:<path_in_container>:<cgroup_permissions> default: null dns_servers: @@ -118,14 +84,14 @@ dns_search_domains: - List of custom DNS search domains. default: null -env_vars: +env: description: - Dictionary of key,value pairs. default: null entrypoint: description: - - Overwrite the default ENTRYPOINT of the image. + - String or list of commands that overwrite the default ENTRYPOINT of the image. default: null etc_hosts: @@ -140,6 +106,8 @@ exposed_ports: If the port is already exposed using EXPOSE in a Dockerfile, it does not need to be exposed again. default: null + aliases: + - exposed force_kill: description: @@ -149,7 +117,7 @@ force_kill: groups: description: - - List of additional groups to join. + - List of additional group names and/or IDs that the container process will run as. default: null hostname: @@ -191,8 +159,8 @@ keep_volumes: kill_signal: description: - - Signal used to kill a running container when state is absent. - default: KILL + - Override default signal used to kill a running container. + default null: kernel_memory: description: @@ -202,7 +170,7 @@ kernel_memory: labels: description: - - Set meta data on the cotnainer. Dictionary of key value paris: + - Dictionary of key value pairs. default: null links: @@ -253,7 +221,7 @@ memory_swap: Number is a positive integer. Unit can be one of b, k, m, or g. default: 0 -memory_swapiness: +memory_swappiness: description: - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. default: 0 @@ -274,10 +242,10 @@ network_mode: - none default: null -net_alias: - description: - - Add network scoped alias to the container. - default: null +oom_killer: + desription: + - Whether or not to disable OOM Killer for the container. + default: false paused: description: @@ -302,6 +270,8 @@ published_ports: - Container ports must be exposed either in the Dockerfile or via the C(expose) option. - A value of ALL will publish all exposed container ports to random host ports, ignoring any other mappings. + aliases: + - ports read_only: description: @@ -375,8 +345,8 @@ state: stop_signal: description: - - Signal used to stop the container. - default: SIGINT + - Override default signal used to stop the container. + default: null stop_timeout: description: @@ -418,6 +388,11 @@ volumes: private label for the volume. default: null +volume_driver: + description: + - The container's volume driver. + default: none + volumes_from: description: - List of container names or Ids to get volumes from. From f389709558f7504271e941d6b5e32285ce18bd3a Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Fri, 4 Mar 2016 13:55:23 -0500 Subject: [PATCH 0849/1113] Updating docker_network module proposal --- .../proposals/docker/docker_network_module.md | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/docs/proposals/docker/docker_network_module.md b/docs/proposals/docker/docker_network_module.md index bf86e672e2b..92e085c29fa 100644 --- a/docs/proposals/docker/docker_network_module.md +++ b/docs/proposals/docker/docker_network_module.md @@ -25,6 +25,11 @@ driver: - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used. default: bridge +driver_options: + description: + - Dictionary of network settings. Consult docker docs for valid options and values. + default: null + force: description: - With state 'absent' forces disconnecting all containers from the network prior to deleting the network. With @@ -37,16 +42,21 @@ incremental: Use incremental to leave existing containers connected. default: false +ipam_driver: + description: + - Specifiy an IPAM driver. + default: null + +ipam_options: + description: + - Dictionary of IPAM options. + default: null + network_name: description: - Name of the network to operate on. default: null required: true - -options: - description: - - Dictionary of network settings. Consult docker docs for valid options and values. - default: null state: description: @@ -117,4 +127,4 @@ state: < results from docker inspect for the affected network > } } -``` \ No newline at end of file +``` From 981f451f0eb4137839681cf552b65a84b52989c7 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 4 Mar 2016 14:01:59 -0500 Subject: [PATCH 0850/1113] Adding fedora 23 to the Docker tests on travis --- .travis.yml | 1 + test/utils/docker/fedora23/Dockerfile | 44 +++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 test/utils/docker/fedora23/Dockerfile diff --git a/.travis.yml b/.travis.yml index 8926a10afb4..f7b6d0fa74a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,6 +15,7 @@ matrix: - env: TARGET=sanity TOXENV=py35 python: 3.5 - env: TARGET=centos7 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" + - env: TARGET=fedora23 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" - env: TARGET=ubuntu1404 addons: apt: diff --git a/test/utils/docker/fedora23/Dockerfile b/test/utils/docker/fedora23/Dockerfile new file mode 100644 index 00000000000..f318224d907 --- /dev/null +++ b/test/utils/docker/fedora23/Dockerfile @@ -0,0 +1,44 @@ +# Latest version of centos +FROM fedora:23 +RUN dnf -y update; dnf clean all +RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ +rm -f /lib/systemd/system/multi-user.target.wants/*; \ +rm -f /etc/systemd/system/*.wants/*; \ +rm -f /lib/systemd/system/local-fs.target.wants/*; \ +rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ +rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ +rm -f /lib/systemd/system/basic.target.wants/*; \ +rm -f /lib/systemd/system/anaconda.target.wants/*; +RUN dnf -y install \ + dbus-python \ + file \ + findutils \ + git \ + make \ + mercurial \ + procps \ + PyYAML \ + python-coverage \ + python2-dnf \ + python-httplib2 \ + python-jinja2 \ + python-keyczar \ + python-mock \ + python-nose \ + python-paramiko \ + python-pip \ + python-setuptools \ + python-virtualenv \ + rubygems \ + subversion \ + sudo \ + tar \ + unzip \ + which \ + yum +RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers +RUN mkdir /etc/ansible/ +RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts +VOLUME /sys/fs/cgroup /run /tmp +ENV container=docker +CMD ["/usr/sbin/init"] From 8867d73420c39b3147e8d1d6910628fe8da99891 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 4 Mar 2016 14:41:35 -0500 Subject: [PATCH 0851/1113] reject extraneous data passed to mode strictly permissions are allowed, file type info should not be passed in alternate fixes #14771 --- lib/ansible/module_utils/basic.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 8d5963a1f02..c63cb7c84ad 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -890,6 +890,10 @@ class AnsibleModule(object): msg="mode must be in octal or symbolic form", details=str(e)) + if mode != stat.S_IMODE(mode): + # prevent mode from having extra info orbeing invalid long number + self.fail_json(path=path, msg="Invalid mode supplied", details=str(e)) + prev_mode = stat.S_IMODE(path_stat.st_mode) if prev_mode != mode: From 62ac5c047e29f6a5ecce4ef648d603aef27a28e4 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 4 Mar 2016 14:44:03 -0500 Subject: [PATCH 0852/1113] clarified message --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index c63cb7c84ad..26c17154b0e 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -892,7 +892,7 @@ class AnsibleModule(object): if mode != stat.S_IMODE(mode): # prevent mode from having extra info orbeing invalid long number - self.fail_json(path=path, msg="Invalid mode supplied", details=str(e)) + self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode) prev_mode = stat.S_IMODE(path_stat.st_mode) From ca62bc5db37b574f40b9e9e8ca9f5624fe5a0c68 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Fri, 4 Mar 2016 11:21:43 -0800 Subject: [PATCH 0853/1113] Add tests for connection plugins. --- test/integration/Makefile | 16 +++++++++- test/integration/test_connection.inventory | 31 +++++++++++++++++++ test/integration/test_connection.yml | 35 ++++++++++++++++++++++ 3 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 test/integration/test_connection.inventory create mode 100644 test/integration/test_connection.yml diff --git a/test/integration/Makefile b/test/integration/Makefile index 7a496f65de2..a685cb8a6f3 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -21,8 +21,9 @@ MYTMPDIR = $(shell mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') VAULT_PASSWORD_FILE = vault-password CONSUL_RUNNING := $(shell python consul_running.py) +EUID := $(shell id -u -r) -all: setup parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log +all: setup parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log test_connection setup: rm -rf $(TEST_DIR) @@ -72,6 +73,19 @@ environment: setup non_destructive: setup ansible-playbook non_destructive.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +test_connection: setup +ifeq ($(EUID),0) + # Test connection plugins when running as root (lang unspecified). + ansible-playbook test_connection.yml -i test_connection.inventory -l '!skip-during-build' $(TEST_FLAGS) + # Test connection plugins when running as root (lang=C). + LC_ALL=C LANG=C ansible-playbook test_connection.yml -i test_connection.inventory -l '!skip-during-build' $(TEST_FLAGS) +else + # Test connection plugins when not running as root (lang unspecified). + ansible-playbook test_connection.yml -i test_connection.inventory -l '!skip-during-build !chroot' $(TEST_FLAGS) + # Test connection plugins when not running as root (lang=C). + LC_ALL=C LANG=C ansible-playbook test_connection.yml -i test_connection.inventory -l '!skip-during-build !chroot' $(TEST_FLAGS) +endif + destructive: setup ansible-playbook destructive.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) diff --git a/test/integration/test_connection.inventory b/test/integration/test_connection.inventory new file mode 100644 index 00000000000..b5edb558952 --- /dev/null +++ b/test/integration/test_connection.inventory @@ -0,0 +1,31 @@ +[local] +local-pipelining ansible_ssh_pipelining=true +local-no-pipelining ansible_ssh_pipelining=false +[local:vars] +ansible_host=localhost +ansible_connection=local + +[chroot] +chroot-pipelining ansible_ssh_pipelining=true +chroot-no-pipelining ansible_ssh_pipelining=false +[chroot:vars] +ansible_host=/ +ansible_connection=chroot + +[ssh] +ssh-pipelining ansible_ssh_pipelining=true +ssh-no-pipelining ansible_ssh_pipelining=false +[ssh:vars] +ansible_host=localhost +ansible_connection=ssh + +[paramiko_ssh] +paramiko_ssh-pipelining ansible_ssh_pipelining=true +paramiko_ssh-no-pipelining ansible_ssh_pipelining=false +[paramiko_ssh:vars] +ansible_host=localhost +ansible_connection=paramiko_ssh + +[skip-during-build:children] +ssh +paramiko_ssh diff --git a/test/integration/test_connection.yml b/test/integration/test_connection.yml new file mode 100644 index 00000000000..5b3d4f1bc3c --- /dev/null +++ b/test/integration/test_connection.yml @@ -0,0 +1,35 @@ +- hosts: all + gather_facts: no + serial: 1 + tasks: + + ### raw with unicode arg and output + + - name: raw with unicode arg and output + raw: echo 汉语 + register: command + - name: check output of raw with unicode arg and output + assert: { that: "'汉语' in command.stdout" } + + ### copy local file with unicode filename and content + + - name: create local file with unicode filename and content + local_action: lineinfile dest=/tmp/ansible-local-汉语 create=true line=汉语 + - name: remove remote file with unicode filename and content + file: path=/tmp/ansible-remote-汉语 state=absent + - name: copy local file with unicode filename and content + copy: src=/tmp/ansible-local-汉语 dest=/tmp/ansible-remote-汉语 + + ### fetch remote file with unicode filename and content + + - name: remove local file with unicode filename and content + local_action: file path=/tmp/ansible-local-汉语 state=absent + - name: fetch remote file with unicode filename and content + fetch: src=/tmp/ansible-remote-汉语 dest=/tmp/ansible-local-汉语 fail_on_missing=true validate_checksum=true flat=true + + ### remove local and remote temp files + + - name: remove local temp file + local_action: file path=/tmp/ansible-local-汉语 state=absent + - name: remove remote temp file + file: path=/tmp/ansible-remote-汉语 state=absent From 49892b287b4d2e1c2ccd058fae1eafd5cf6b493d Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Fri, 4 Mar 2016 18:14:52 -0800 Subject: [PATCH 0854/1113] Use vars to eliminate redundant test commands. --- test/integration/Makefile | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index a685cb8a6f3..5e937618d49 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -73,19 +73,18 @@ environment: setup non_destructive: setup ansible-playbook non_destructive.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -test_connection: setup -ifeq ($(EUID),0) - # Test connection plugins when running as root (lang unspecified). - ansible-playbook test_connection.yml -i test_connection.inventory -l '!skip-during-build' $(TEST_FLAGS) - # Test connection plugins when running as root (lang=C). - LC_ALL=C LANG=C ansible-playbook test_connection.yml -i test_connection.inventory -l '!skip-during-build' $(TEST_FLAGS) -else - # Test connection plugins when not running as root (lang unspecified). - ansible-playbook test_connection.yml -i test_connection.inventory -l '!skip-during-build !chroot' $(TEST_FLAGS) - # Test connection plugins when not running as root (lang=C). - LC_ALL=C LANG=C ansible-playbook test_connection.yml -i test_connection.inventory -l '!skip-during-build !chroot' $(TEST_FLAGS) +# Skip connection plugins which require root when not running as root. +ifneq ($(EUID),0) +TEST_CONNECTION_FILTER := !chroot endif +# Connection plugin test command to repeat with each locale setting. +TEST_CONNECTION_CMD = $(1) ansible-playbook test_connection.yml -i test_connection.inventory -l '!skip-during-build $(TEST_CONNECTION_FILTER)' $(TEST_FLAGS) + +test_connection: setup + $(call TEST_CONNECTION_CMD) + $(call TEST_CONNECTION_CMD, LC_ALL=C LANG=C) + destructive: setup ansible-playbook destructive.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) From 130c995208d77ee9703e03d5fcba9ac7cb635588 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= <mail@renemoser.net> Date: Fri, 4 Mar 2016 11:20:53 +0100 Subject: [PATCH 0855/1113] proposal: ignore_checkmode is a better name as suggested by @bcoca --- docs/proposals/rename_always_run.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/proposals/rename_always_run.md b/docs/proposals/rename_always_run.md index 6adcd508f49..e3c05d7a8d7 100644 --- a/docs/proposals/rename_always_run.md +++ b/docs/proposals/rename_always_run.md @@ -1,4 +1,4 @@ -# Rename always_run to checkmode_run +# Rename always_run to ignore_checkmode *Author*: René Moser <@resmo> @@ -25,10 +25,10 @@ You have a conditional but also a word that says `always`. This is a conflict in ## Solution Proposal -Deprecate `always_run` by rename it to `checkmode_run`: +Deprecate `always_run` by rename it to `ignore_checkmode`: ``` - shell: dangerous_cleanup.sh when: cleanup == "yes" - checkmode_run: yes + ignore_checkmode: yes ``` From 299c18d70009ee7a6d79fe99e5cc9444cefc790d Mon Sep 17 00:00:00 2001 From: Kishin Yagami <k.yagami.suou@gmail.com> Date: Sat, 5 Mar 2016 20:15:04 +0900 Subject: [PATCH 0856/1113] Support strategy_plugins setting in a configuration file --- docsite/rst/developing_plugins.rst | 1 + docsite/rst/intro_configuration.rst | 14 ++++++++++++++ examples/ansible.cfg | 1 + lib/ansible/constants.py | 1 + lib/ansible/plugins/__init__.py | 2 +- 5 files changed, 18 insertions(+), 1 deletion(-) diff --git a/docsite/rst/developing_plugins.rst b/docsite/rst/developing_plugins.rst index 2b0c2344d01..125163407a6 100644 --- a/docsite/rst/developing_plugins.rst +++ b/docsite/rst/developing_plugins.rst @@ -112,6 +112,7 @@ to /usr/share/ansible/plugins, in a subfolder for each plugin type:: * connection_plugins * filter_plugins * vars_plugins + * strategy_plugins To change this path, edit the ansible configuration file. diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 4272ef7fb9b..190c0cf6be3 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -600,6 +600,20 @@ Additional paths can be provided separated by colon characters, in the same way Roles will be first searched for in the playbook directory. Should a role not be found, it will indicate all the possible paths that were searched. +.. _strategy_plugins: + +strategy_plugins +================== + +Strategy plugin allow users to change the way in which Ansible runs tasks on targeted hosts. + +This is a developer-centric feature that allows low-level extensions around Ansible to be loaded from +different locations:: + + strategy_plugins = ~/.ansible/plugins/strategy_plugins/:/usr/share/ansible_plugins/strategy_plugins + +Most users will not need to use this feature. See :doc:`developing_plugins` for more details + .. _sudo_exe: sudo_exe diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 91ef70b77a5..48628441fb0 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -141,6 +141,7 @@ #vars_plugins = /usr/share/ansible/plugins/vars #filter_plugins = /usr/share/ansible/plugins/filter #test_plugins = /usr/share/ansible/plugins/test +#strategy_plugins = /usr/share/ansible/plugins/strategy # by default callbacks are not loaded for /bin/ansible, enable this if you # want, for example, a notification or logging callback to also apply to diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 6623b8f0c34..2ca0173438d 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -212,6 +212,7 @@ DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', ' DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', ispath=True) DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', ispath=True) DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', ispath=True) +DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS', '~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', ispath=True) DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default') # cache CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index 139e5a7d612..f0fa0ec62bd 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -444,7 +444,7 @@ fragment_loader = PluginLoader( strategy_loader = PluginLoader( 'StrategyModule', 'ansible.plugins.strategy', - None, + C.DEFAULT_STRATEGY_PLUGIN_PATH, 'strategy_plugins', required_base_class='StrategyBase', ) From db2a0ae2551268efac6e39edd2e587c38e7084d6 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 5 Mar 2016 10:58:12 -0500 Subject: [PATCH 0857/1113] Testing adding -j2 to the run_tests.sh script to speed up docker tests --- test/utils/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index 33ac7338d81..87324da2219 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -10,6 +10,6 @@ if [ "${TARGET}" = "sanity" ]; then else docker build -t ansible_test/${TARGET} test/utils/docker/${TARGET} docker run -d --volume="${PWD}:/root/ansible" ${TARGET_OPTIONS} ansible_test/${TARGET} > /tmp/cid_${TARGET} - docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c 'cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)' + docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c 'cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make -j2)' docker kill $(cat /tmp/cid_${TARGET}) fi From e5844ee03dba15733dc0be035c60fd09a5828407 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 5 Mar 2016 11:50:10 -0500 Subject: [PATCH 0858/1113] Revert "Testing adding -j2 to the run_tests.sh script to speed up docker tests" This reverts commit db2a0ae2551268efac6e39edd2e587c38e7084d6, as it does not add any speed to the tests on Travis. --- test/utils/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index 87324da2219..33ac7338d81 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -10,6 +10,6 @@ if [ "${TARGET}" = "sanity" ]; then else docker build -t ansible_test/${TARGET} test/utils/docker/${TARGET} docker run -d --volume="${PWD}:/root/ansible" ${TARGET_OPTIONS} ansible_test/${TARGET} > /tmp/cid_${TARGET} - docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c 'cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make -j2)' + docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c 'cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)' docker kill $(cat /tmp/cid_${TARGET}) fi From c63287393602ffe76f85af64abefe0140337635f Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 5 Mar 2016 13:47:23 -0500 Subject: [PATCH 0859/1113] Adding centos 6 to the travis mix --- .travis.yml | 1 + test/utils/docker/centos6/Dockerfile | 33 ++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 test/utils/docker/centos6/Dockerfile diff --git a/.travis.yml b/.travis.yml index f7b6d0fa74a..3c3b0c22d35 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,6 +14,7 @@ matrix: python: 3.4 - env: TARGET=sanity TOXENV=py35 python: 3.5 + - env: TARGET=centos6 - env: TARGET=centos7 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" - env: TARGET=fedora23 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" - env: TARGET=ubuntu1404 diff --git a/test/utils/docker/centos6/Dockerfile b/test/utils/docker/centos6/Dockerfile new file mode 100644 index 00000000000..d96267b1deb --- /dev/null +++ b/test/utils/docker/centos6/Dockerfile @@ -0,0 +1,33 @@ +# Latest version of centos +FROM centos:centos6 +RUN yum -y update; yum clean all; +RUN yum -y install \ + epel-release \ + file \ + git \ + make \ + mercurial \ + rubygems \ + sed \ + subversion \ + sudo \ + unzip \ + which +RUN yum -y install \ + PyYAML \ + python-coverage \ + python-httplib2 \ + python-jinja2 \ + python-keyczar \ + python-mock \ + python-nose \ + python-paramiko \ + python-pip \ + python-setuptools \ + python-virtualenv +RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers +RUN mkdir /etc/ansible/ +RUN /bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts +#VOLUME /sys/fs/cgroup /run /tmp +ENV container=docker +CMD ["/sbin/init"] From b72cd3a6d218579f102007eba7e4f9165764042c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 5 Mar 2016 14:35:49 -0500 Subject: [PATCH 0860/1113] Adding fedora rawhide to the travis mix --- .travis.yml | 1 + test/utils/docker/fedora-rawhide/Dockerfile | 46 +++++++++++++++++++++ test/utils/docker/fedora23/Dockerfile | 1 + 3 files changed, 48 insertions(+) create mode 100644 test/utils/docker/fedora-rawhide/Dockerfile diff --git a/.travis.yml b/.travis.yml index 3c3b0c22d35..35ff691f400 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,6 +17,7 @@ matrix: - env: TARGET=centos6 - env: TARGET=centos7 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" - env: TARGET=fedora23 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" + - env: TARGET=fedora-rawhide TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" - env: TARGET=ubuntu1404 addons: apt: diff --git a/test/utils/docker/fedora-rawhide/Dockerfile b/test/utils/docker/fedora-rawhide/Dockerfile new file mode 100644 index 00000000000..6ad09779aa8 --- /dev/null +++ b/test/utils/docker/fedora-rawhide/Dockerfile @@ -0,0 +1,46 @@ +# Latest version of fedora rawhide +FROM fedora:rawhide +RUN dnf -y update; dnf clean all +RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ +rm -f /lib/systemd/system/multi-user.target.wants/*; \ +rm -f /etc/systemd/system/*.wants/*; \ +rm -f /lib/systemd/system/local-fs.target.wants/*; \ +rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ +rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ +rm -f /lib/systemd/system/basic.target.wants/*; \ +rm -f /lib/systemd/system/anaconda.target.wants/*; +RUN dnf -y install \ + dbus-python \ + file \ + findutils \ + git \ + glibc-locale-source \ + make \ + mercurial \ + procps \ + PyYAML \ + python-coverage \ + python2-dnf \ + python-httplib2 \ + python-jinja2 \ + python-keyczar \ + python-mock \ + python-nose \ + python-paramiko \ + python-pip \ + python-setuptools \ + python-virtualenv \ + rubygems \ + subversion \ + sudo \ + tar \ + unzip \ + which \ + yum +RUN localedef --quiet -c -i en_US -f UTF-8 en_US.UTF-8 +RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers +RUN mkdir /etc/ansible/ +RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts +VOLUME /sys/fs/cgroup /run /tmp +ENV container=docker +CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/fedora23/Dockerfile b/test/utils/docker/fedora23/Dockerfile index f318224d907..be0b3a48410 100644 --- a/test/utils/docker/fedora23/Dockerfile +++ b/test/utils/docker/fedora23/Dockerfile @@ -13,6 +13,7 @@ RUN dnf -y install \ dbus-python \ file \ findutils \ + glibc-locale-source \ git \ make \ mercurial \ From 3a1944edc15de9f13343cb7368fb97fc487a5d77 Mon Sep 17 00:00:00 2001 From: Jay Jahns <jjahns@vmware.com> Date: Sat, 5 Mar 2016 11:50:51 -0800 Subject: [PATCH 0861/1113] add find_vm_by_name function to vmware utils --- lib/ansible/module_utils/vmware.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/ansible/module_utils/vmware.py b/lib/ansible/module_utils/vmware.py index 2f895801466..a0999c05442 100644 --- a/lib/ansible/module_utils/vmware.py +++ b/lib/ansible/module_utils/vmware.py @@ -99,6 +99,15 @@ def find_hostsystem_by_name(content, hostname): return None +def find_vm_by_name(content, vm_name): + + vms = get_all_objs(content, [vim.VirtualMachine]) + for vm in vms: + if vm.name == vm_name: + return vm + return None + + def vmware_argument_spec(): return dict( From b97a98f69e0566562e30cd510d1c3da6c0aff60c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 5 Mar 2016 15:45:37 -0500 Subject: [PATCH 0862/1113] Trying to fix up the fedora23 docker image --- test/utils/docker/fedora23/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/utils/docker/fedora23/Dockerfile b/test/utils/docker/fedora23/Dockerfile index be0b3a48410..4c05a3be2c2 100644 --- a/test/utils/docker/fedora23/Dockerfile +++ b/test/utils/docker/fedora23/Dockerfile @@ -13,7 +13,6 @@ RUN dnf -y install \ dbus-python \ file \ findutils \ - glibc-locale-source \ git \ make \ mercurial \ @@ -37,6 +36,8 @@ RUN dnf -y install \ unzip \ which \ yum +RUN localedef -f ISO-8859-1 -i pt_BR pt_BR +RUN localedef -f ISO-8859-1 -i es_MX es_MX RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts From 75b9c7db14aef78bdadc6536fe7def4853b158f6 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sat, 5 Mar 2016 17:39:43 -0500 Subject: [PATCH 0863/1113] moved hardcoded settings from doccli to constants --- lib/ansible/cli/doc.py | 10 ++++------ lib/ansible/constants.py | 3 +++ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index 265b1c9a3fc..950e3d78fc1 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -26,6 +26,7 @@ import textwrap from ansible.compat.six import iteritems +from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.plugins import module_loader from ansible.cli import CLI @@ -41,9 +42,6 @@ except ImportError: class DocCLI(CLI): """ Vault command line class """ - BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt') - IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "test-docs.sh"] - def __init__(self, args): super(DocCLI, self).__init__(args) @@ -96,7 +94,7 @@ class DocCLI(CLI): display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) continue - if any(filename.endswith(x) for x in self.BLACKLIST_EXTS): + if any(filename.endswith(x) for x in C.BLACKLIST_EXTS): continue try: @@ -143,11 +141,11 @@ class DocCLI(CLI): continue elif os.path.isdir(module): self.find_modules(module) - elif any(module.endswith(x) for x in self.BLACKLIST_EXTS): + elif any(module.endswith(x) for x in C.BLACKLIST_EXTS): continue elif module.startswith('__'): continue - elif module in self.IGNORE_FILES: + elif module in C.IGNORE_FILES: continue elif module.startswith('_'): fullpath = '/'.join([path,module]) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 2ca0173438d..d38dde6eb4e 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -302,3 +302,6 @@ VAULT_VERSION_MIN = 1.0 VAULT_VERSION_MAX = 1.0 TREE_DIR = None LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1']) +# module search +BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm', '.md', '.txt') +IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES", "test-docs.sh"] From 56670bd15000b2d383dff77eaa962d06d4ed95c3 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sat, 5 Mar 2016 18:36:28 -0500 Subject: [PATCH 0864/1113] More tweaks for fedora on docker --- test/utils/docker/fedora23/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/test/utils/docker/fedora23/Dockerfile b/test/utils/docker/fedora23/Dockerfile index 4c05a3be2c2..1bfd7279045 100644 --- a/test/utils/docker/fedora23/Dockerfile +++ b/test/utils/docker/fedora23/Dockerfile @@ -13,6 +13,7 @@ RUN dnf -y install \ dbus-python \ file \ findutils \ + glibc-common \ git \ make \ mercurial \ From 43309c0f5d0d0fc800e19afd11017279602a5fb7 Mon Sep 17 00:00:00 2001 From: Michael Scherer <misc@zarb.org> Date: Sun, 6 Mar 2016 11:24:02 +0100 Subject: [PATCH 0865/1113] Fix various mispellings --- docsite/rst/porting_guide_2.0.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 44a920f984c..a112d8ebcbf 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -186,14 +186,14 @@ Here are some corner cases encountered when updating, these are mostly caused by The `port` variable is reserved as a play/task directive for overriding the connection port, in previous versions this got conflated with a variable named `port` and was usable later in the play, this created issues if a host tried to reconnect or was using a non caching connection. Now it will be correctly identified as a directive and the `port` variable - will appear as undefined, this now forces the use of non conflicting names and removes ambiguity when adding settings and varaibles to a role invocation.. + will appear as undefined, this now forces the use of non conflicting names and removes ambiguity when adding settings and variables to a role invocation. * Bare operations on `with_`:: with_items: var1 + var2 - An issue with the 'bare variable' features, which was supposed only tempate a single variable without the need of braces ({{ )}}, would in some versions of Ansible template full expressions. - Now you need to use proper templating and braces for all expressions everywhere except condtionals (`when`):: + An issue with the 'bare variable' features, which was supposed only template a single variable without the need of braces ({{ )}}, would in some versions of Ansible template full expressions. + Now you need to use proper templating and braces for all expressions everywhere except conditionals (`when`):: with_items: "{{var1 + var2}}" From 762c99f77c7f751ee90377c3199aa4745ac8a8c3 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 6 Mar 2016 07:48:38 -0500 Subject: [PATCH 0866/1113] Fixing centos6 docker image to upgrade jinja2 --- test/utils/docker/centos6/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/test/utils/docker/centos6/Dockerfile b/test/utils/docker/centos6/Dockerfile index d96267b1deb..6ea062e8438 100644 --- a/test/utils/docker/centos6/Dockerfile +++ b/test/utils/docker/centos6/Dockerfile @@ -25,6 +25,7 @@ RUN yum -y install \ python-pip \ python-setuptools \ python-virtualenv +RUN pip install --upgrade jinja2 RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts From 2ee0c1b175b3a02196d931f318c77a8fa5f14eca Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 6 Mar 2016 08:09:30 -0500 Subject: [PATCH 0867/1113] Fixing centos6 docker image for pycrytpo too --- test/utils/docker/centos6/Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/utils/docker/centos6/Dockerfile b/test/utils/docker/centos6/Dockerfile index 6ea062e8438..ebca54ae65f 100644 --- a/test/utils/docker/centos6/Dockerfile +++ b/test/utils/docker/centos6/Dockerfile @@ -4,6 +4,7 @@ RUN yum -y update; yum clean all; RUN yum -y install \ epel-release \ file \ + gcc \ git \ make \ mercurial \ @@ -16,6 +17,7 @@ RUN yum -y install \ RUN yum -y install \ PyYAML \ python-coverage \ + python-devel \ python-httplib2 \ python-jinja2 \ python-keyczar \ @@ -26,6 +28,7 @@ RUN yum -y install \ python-setuptools \ python-virtualenv RUN pip install --upgrade jinja2 +RUN rpm -e --nodeps python-crypto; pip install pycrypto RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts From c0f1e1801bb29c708671de21344eafd40004b0c9 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Sun, 6 Mar 2016 08:08:22 -0500 Subject: [PATCH 0868/1113] adds multiline flag to regex test for search and match This commit adds the multiline flag to the regexp search and match test plugin. It defaults to re.M = False for backwards compatibility. To use the multiline feature add multiline=True to the test filter {{ config | search('^hostname', multiline=True) }} --- lib/ansible/plugins/test/core.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/test/core.py b/lib/ansible/plugins/test/core.py index fb9e0fb86e7..641172b91c4 100644 --- a/lib/ansible/plugins/test/core.py +++ b/lib/ansible/plugins/test/core.py @@ -62,26 +62,27 @@ def skipped(*a, **kw): skipped = item.get('skipped', False) return skipped -def regex(value='', pattern='', ignorecase=False, match_type='search'): +def regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'): ''' Expose `re` as a boolean filter using the `search` method by default. This is likely only useful for `search` and `match` which already have their own filters. ''' + flags = 0 if ignorecase: - flags = re.I - else: - flags = 0 + flags |= re.I + if multiline: + flags |= re.M _re = re.compile(pattern, flags=flags) _bool = __builtins__.get('bool') return _bool(getattr(_re, match_type, 'search')(value)) -def match(value, pattern='', ignorecase=False): +def match(value, pattern='', ignorecase=False, multiline=False): ''' Perform a `re.match` returning a boolean ''' - return regex(value, pattern, ignorecase, 'match') + return regex(value, pattern, ignorecase, multiline, 'match') -def search(value, pattern='', ignorecase=False): +def search(value, pattern='', ignorecase=False, multiline=False): ''' Perform a `re.search` returning a boolean ''' - return regex(value, pattern, ignorecase, 'search') + return regex(value, pattern, ignorecase, multiline, 'search') class TestModule(object): ''' Ansible core jinja2 tests ''' From 40b192d149295653cd7d64796b2dc9241bdfc0fe Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 6 Mar 2016 09:04:24 -0500 Subject: [PATCH 0869/1113] Adding irc notifications to travis config --- .travis.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.travis.yml b/.travis.yml index 35ff691f400..df224937cfe 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,3 +31,12 @@ script: - ./test/utils/run_tests.sh after_success: - coveralls +notifications: + irc: + channels: + - "chat.freenode.net#ansible-notices" + on_success: change + on_failure: always + use_notice: true + skip_join: true + nick: ansibletravis From 5aab1589874a69a95c0a897d665ebae5b9f602b4 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sun, 6 Mar 2016 09:14:02 -0500 Subject: [PATCH 0870/1113] removed unused imports --- lib/ansible/module_utils/facts.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 2d3378cb299..39dcd5455a9 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -19,10 +19,8 @@ import os import sys import stat import time -import array import shlex import errno -import fcntl import fnmatch import glob import platform From 0e17a6f03672c1403c5dbb41f8c3b6f41e4513c0 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 6 Mar 2016 09:55:59 -0500 Subject: [PATCH 0871/1113] Another attempt at fixing random f23 locale failures in docker image --- test/utils/docker/fedora23/Dockerfile | 4 ++-- test/utils/run_tests.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/utils/docker/fedora23/Dockerfile b/test/utils/docker/fedora23/Dockerfile index 1bfd7279045..50eb8e53514 100644 --- a/test/utils/docker/fedora23/Dockerfile +++ b/test/utils/docker/fedora23/Dockerfile @@ -37,8 +37,8 @@ RUN dnf -y install \ unzip \ which \ yum -RUN localedef -f ISO-8859-1 -i pt_BR pt_BR -RUN localedef -f ISO-8859-1 -i es_MX es_MX +RUN localedef -q -f ISO-8859-1 -i pt_BR pt_BR +RUN localedef -q -f ISO-8859-1 -i es_MX es_MX RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index 33ac7338d81..d4787da69a0 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -8,7 +8,7 @@ if [ "${TARGET}" = "sanity" ]; then if test x"$TOXENV" != x'py24' ; then tox ; fi if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi else - docker build -t ansible_test/${TARGET} test/utils/docker/${TARGET} + docker build --pull=true -t ansible_test/${TARGET} test/utils/docker/${TARGET} docker run -d --volume="${PWD}:/root/ansible" ${TARGET_OPTIONS} ansible_test/${TARGET} > /tmp/cid_${TARGET} docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c 'cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)' docker kill $(cat /tmp/cid_${TARGET}) From 7d8b84dae6561c1fc7bdd0a50c4d3f72f683e43c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sun, 6 Mar 2016 10:00:36 -0500 Subject: [PATCH 0872/1113] fix assemble to not pass extra args to copy/file also small refactor to dedupe code fixes https://github.com/ansible/ansible-modules-core/issues/3154 --- lib/ansible/plugins/action/assemble.py | 41 ++++++++++++-------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py index 4bbecbd25a0..3dc0bad53f5 100644 --- a/lib/ansible/plugins/action/assemble.py +++ b/lib/ansible/plugins/action/assemble.py @@ -100,7 +100,6 @@ class ActionModule(ActionBase): if boolean(remote_src): result.update(self._execute_module(tmp=tmp, task_vars=task_vars)) return result - elif self._task._role is not None: src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src) else: @@ -123,6 +122,22 @@ class ActionModule(ActionBase): dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow) diff = {} + + # setup args for running modules + new_module_args = self._task.args.copy() + + # clean assemble specific options + for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden']: + if opt in new_module_args: + del new_module_args[opt] + + new_module_args.update( + dict( + dest=dest, + original_basename=os.path.basename(src), + ) + ) + if path_checksum != dest_stat['checksum']: resultant = file(path).read() @@ -135,31 +150,13 @@ class ActionModule(ActionBase): if self._play_context.become and self._play_context.become_user != 'root': self._remote_chmod('a+r', xfered) - # run the copy module - - new_module_args = self._task.args.copy() - new_module_args.update( - dict( - src=xfered, - dest=dest, - original_basename=os.path.basename(src), - ) - ) + new_module_args.update( dict( src=xfered,)) res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp) if diff: res['diff'] = diff result.update(res) - return result else: - new_module_args = self._task.args.copy() - new_module_args.update( - dict( - src=xfered, - dest=dest, - original_basename=os.path.basename(src), - ) - ) - result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp)) - return result + + return result From 8cfdaa16f437b96e00f249c887573e1bfbf6ec79 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 6 Mar 2016 11:07:12 -0500 Subject: [PATCH 0873/1113] Fixing error in localedef usage for f23 docker image --- test/utils/docker/fedora23/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/utils/docker/fedora23/Dockerfile b/test/utils/docker/fedora23/Dockerfile index 50eb8e53514..3695b9a613b 100644 --- a/test/utils/docker/fedora23/Dockerfile +++ b/test/utils/docker/fedora23/Dockerfile @@ -37,8 +37,8 @@ RUN dnf -y install \ unzip \ which \ yum -RUN localedef -q -f ISO-8859-1 -i pt_BR pt_BR -RUN localedef -q -f ISO-8859-1 -i es_MX es_MX +RUN localedef --quiet -f ISO-8859-1 -i pt_BR pt_BR +RUN localedef --quiet -f ISO-8859-1 -i es_MX es_MX RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts From efd02020a299387f7a2f9e31b5550cf7a3bcfb0f Mon Sep 17 00:00:00 2001 From: chouseknecht <chouseknecht@ansible.com> Date: Sun, 6 Mar 2016 11:28:54 -0500 Subject: [PATCH 0874/1113] Updating docker_container module proposal --- ..._moduler.md => docker_container_module.md} | 26 ++++++++----------- 1 file changed, 11 insertions(+), 15 deletions(-) rename docs/proposals/docker/{docker_container_moduler.md => docker_container_module.md} (96%) diff --git a/docs/proposals/docker/docker_container_moduler.md b/docs/proposals/docker/docker_container_module.md similarity index 96% rename from docs/proposals/docker/docker_container_moduler.md rename to docs/proposals/docker/docker_container_module.md index 4b826ad5d13..886b510482b 100644 --- a/docs/proposals/docker/docker_container_moduler.md +++ b/docs/proposals/docker/docker_container_module.md @@ -96,8 +96,8 @@ entrypoint: etc_hosts: description: - - List of custom host-to-IP mappings, with each mapping in the format C(host:ip), to be - added to the container's /etc/hosts file. + - Dict of host-to-IP mappings, where each host name is key in the dictionary. Hostname will be added to the + container's /etc/hosts file. default: null exposed_ports: @@ -135,16 +135,6 @@ interactive: - Keep stdin open after a container is launched, even if not attached. default: false -ip_address: - description: - - Container IPv4 address. - default: null - -ipv6_address: - description: - - Container IPv6 address. - default: null - ipc_mode: description: - Set the IPC mode for the container. Can be one of @@ -175,7 +165,7 @@ labels: links: description: - - List of name aliases for linked containers in the format C(redis:myredis) + - List of name aliases for linked containers in the format C(container_name:alias) default: null log_driver: @@ -242,6 +232,12 @@ network_mode: - none default: null +networks: + description: + - Dictionary of networks to which the container will be connected. The dictionary must have a name key (the name of the network). + Optional keys include: aliases (a list of container aliases), and links (a list of links in the format C(container_name:alias)). + default: null + oom_killer: desription: - Whether or not to disable OOM Killer for the container. @@ -363,9 +359,9 @@ tty: - Allocate a psuedo-TTY. default: false -ulimit: +ulimits: description: - - List of ulimit options. A ulimit is specified as C(nofile=262144:262144) + - List of ulimit options. A ulimit is specified as C(nofile:262144:262144) default: null user: From d361f3b44ea06789d4611c413f73a2b4efa5aa77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= <mail@renemoser.net> Date: Mon, 7 Mar 2016 07:35:04 +0100 Subject: [PATCH 0875/1113] proposal: re-run handlers --- docs/proposals/re-run-handlers.md | 77 +++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 docs/proposals/re-run-handlers.md diff --git a/docs/proposals/re-run-handlers.md b/docs/proposals/re-run-handlers.md new file mode 100644 index 00000000000..9b5a01df8fa --- /dev/null +++ b/docs/proposals/re-run-handlers.md @@ -0,0 +1,77 @@ +# Proposal: Re-run handlers cli option + +*Author*: René Moser <@resmo> + +*Date*: 07/03/2016 + +- Status: New + +## Motivation + +The most annoying thing users face using ansible in production is running handlers manually after a task failed after a notified handler. + +### Problems + +Handler notifications get lost after a task failed and there is no help from ansible to catch up the notified handlers in a next ansible playbook run. + +~~~yaml +- hosts: localhost + gather_facts: no + tasks: + - name: simple task + shell: echo foo + notify: get msg out + + - name: this tasks fails + fail: msg="something went wrong" + + handlers: + - name: get msg out + shell: echo handler run +~~~ + +Result: + +~~~ +$ ansible-playbook test.yml + +PLAY *************************************************************************** + +TASK [simple task] ************************************************************* +changed: [localhost] + +TASK [this tasks fails] ******************************************************** +fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "something went wrong"} + +NO MORE HOSTS LEFT ************************************************************* + +RUNNING HANDLER [get msg out] ************************************************** + to retry, use: --limit @test.retry + +PLAY RECAP ********************************************************************* +localhost : ok=1 changed=1 unreachable=0 failed=1 +~~~ + +## Solution proposal + +Similar to retry, ansible should provide a way to manully invoke a list of handlers additionaly to the notified handlers in the plays: + +~~~ + $ ansible-playbook test.yml --notify-handlers <handler>,<handler>,<handler> + $ ansible-playbook test.yml --notify-handlers @test.handlers +~~~ + +Example: + +~~~ + $ ansible-playbook test.yml --notify-handlers "get msg out" +~~~ + +The stdout of a failed play should provide an example how to run notified handlers in the next run: + +~~~ +... +RUNNING HANDLER [get msg out] ************************************************** + to retry, use: --limit @test.retry --notify-handlers @test.handlers +~~~ + From 66cb77107b60156ec4085300e6bd151a283a709c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= <mail@renemoser.net> Date: Mon, 7 Mar 2016 11:03:53 +0100 Subject: [PATCH 0876/1113] proposal: publish/subscribe for handlers --- docs/proposals/publish-subscribe.md | 205 ++++++++++++++++++++++++++++ 1 file changed, 205 insertions(+) create mode 100644 docs/proposals/publish-subscribe.md diff --git a/docs/proposals/publish-subscribe.md b/docs/proposals/publish-subscribe.md new file mode 100644 index 00000000000..f31d2dca33d --- /dev/null +++ b/docs/proposals/publish-subscribe.md @@ -0,0 +1,205 @@ +# Publish / Subscribe for Handlers + +*Author*: René Moser <@resmo> + +*Date*: 07/03/2016 + +## Motivation + +In some use cases a publish/subscribe kind of event to run a handler is more convenient, e.g. restart services after replacing SSL certs. + +However, ansible does not provide a built-in way to handle it yet. + + +### Problem + +If your SSL cert changes, you usually have to reload/restart services to use the new certificate. + +However, If you have a ssl role or a generic ssl play, you usually don't want to add specific handlers to it. +Instead it would be much more convenient to use a publish/subscribe kind of paradigm in the roles where the services are configured in. + +The way we implemented it currently: + +I use notify to set a fact where later (in different plays) we act on a fact using notify again. + +~~~yaml +--- +- hosts: localhost + gather_facts: no + tasks: + - name: copy an ssl cert + shell: echo cert has been changed + notify: publish ssl cert change + handlers: + - name: publish ssl cert change + set_fact: + ssl_cert_changed: true + +- hosts: localhost + gather_facts: no + tasks: + - name: subscribe for ssl cert change + shell: echo cert changed + notify: service restart one + when: ssl_cert_changed is defined and ssl_cert_changed + handlers: + - name: service restart one + shell: echo service one restarted + +- hosts: localhost + gather_facts: no + tasks: + - name: subscribe for ssl cert change + shell: echo cert changed + when: ssl_cert_changed is defined and ssl_cert_changed + notify: service restart two + handlers: + - name: service restart two + shell: echo service two restarted +~~~ + +However, this looks like a workaround of a feature that ansible should provide in a much cleaner way. + +## Approaches + +### Approach 1: + +Provide new `subscribe` keyword on handlers: + +~~~yaml +- hosts: localhost + gather_facts: no + tasks: + - name: copy an ssl cert + shell: echo cert has been changed + + +- hosts: localhost + gather_facts: no + handlers: + - name: service restart one + shell: echo service one restarted + subscribe: copy an ssl cert + + +- hosts: localhost + gather_facts: no + handlers: + - name: service restart two + shell: echo service two restarted + subscribe: copy an ssl cert +~~~ + +### Approach 2: + +Provide new `subscribe` on handlers and `publish` keywords in tasks: + +~~~yaml +- hosts: localhost + gather_facts: no + tasks: + - name: copy an ssl cert + shell: echo cert has been changed + publish: yes + + +- hosts: localhost + gather_facts: no + handlers: + - name: service restart one + shell: echo service one restarted + subscribe: copy an ssl cert + + +- hosts: localhost + gather_facts: no + handlers: + - name: service restart two + shell: echo service two restarted + subscribe: copy an ssl cert +~~~ + +### Approach 3: + +Provide new `subscribe` module: + +A subscribe module could consume the results of a task by name, optionally the value to react on could be specified (default: `changed`) + +~~~yaml +- hosts: localhost + gather_facts: no + tasks: + - name: copy an ssl cert + shell: echo cert has been changed + + +- hosts: localhost + gather_facts: no + tasks: + - subscribe: + name: copy an ssl cert + notify: service restart one + handlers: + - name: service restart one + shell: echo service one restarted + + +- hosts: localhost + gather_facts: no + tasks: + - subscribe: + name: copy an ssl cert + react_on: changed + notify: service restart two + handlers: + - name: service restart two + shell: echo service two restarted +~~~ + + +### Approach 4: + +Provide new `subscribe` module (same as Approach 3) and `publish` keyword: + +~~~yaml +- hosts: localhost + gather_facts: no + tasks: + - name: copy an ssl cert + shell: echo cert has been changed + publish: yes + + +- hosts: localhost + gather_facts: no + tasks: + - subscribe: + name: copy an ssl cert + notify: service restart one + handlers: + - name: service restart one + shell: echo service one restarted + + +- hosts: localhost + gather_facts: no + tasks: + - subscribe: + name: copy an ssl cert + notify: service restart two + handlers: + - name: service restart two + shell: echo service two restarted +~~~ + +### Clarifications about role dependencies and publish + +When using service roles having the subscription handlers and the publish task (e.g. cert change) is defined in a depended role (SSL role) only the first service role running the "cert change" task as dependency will trigger the publish. + +In any other service role in the playbook having "SSL role" as dependency, the task won't be `changed` anymore. + +Therefore a once published "message" should not be overwritten or so called "unpublished" by running the same task in a followed role in the playbook. + +## Conclusion + +Feedback is requested to improve any of the above approaches, or provide further approaches to solve this problem. From 8b4ebd8e4a654f800fb0a4caab2a302f471bc034 Mon Sep 17 00:00:00 2001 From: John Barker <john@johnrbarker.com> Date: Mon, 7 Mar 2016 12:49:41 +0000 Subject: [PATCH 0877/1113] Make it easier to read error message --- lib/ansible/playbook/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py index 7725b5c3c9b..b9eafe44275 100644 --- a/lib/ansible/playbook/base.py +++ b/lib/ansible/playbook/base.py @@ -414,7 +414,7 @@ class Base: def _validate_variable_keys(ds): for key in ds: if not isidentifier(key): - raise TypeError("%s is not a valid variable name" % key) + raise TypeError("'%s' is not a valid variable name" % key) try: if isinstance(ds, dict): From 25e9b5788bb2c43743a06f877f34c2336d19338b Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 7 Mar 2016 09:48:21 -0500 Subject: [PATCH 0878/1113] add per item diff handling fixes #14843 --- lib/ansible/executor/process/result.py | 2 ++ lib/ansible/plugins/strategy/__init__.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 7c75bbdfc21..92e43485f59 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -115,6 +115,8 @@ class ResultProcess(multiprocessing.Process): self._send_result(('v2_playbook_item_on_skipped', result)) else: self._send_result(('v2_playbook_item_on_ok', result)) + if 'diff' in result._result: + self._send_result(('v2_on_file_diff', result)) continue clean_copy = strip_internal_keys(result._result) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 8d40aaaefeb..7a948e78d7e 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -331,6 +331,9 @@ class StrategyBase: self._variable_manager.set_host_facts(target_host, facts) elif result[0].startswith('v2_playbook_item') or result[0] == 'v2_playbook_retry': self._tqm.send_callback(result[0], result[1]) + elif result[0] == 'v2_on_file_diff': + if self._diff: + self._tqm.send_callback('v2_on_file_diff', result[1]) else: raise AnsibleError("unknown result message received: %s" % result[0]) From 299d93f6e9b3c5959ccb8c5360dfe0d81cde034d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 7 Mar 2016 13:02:16 -0500 Subject: [PATCH 0879/1113] Updating unit tests for PlayIterator This knowingly introduces a broken test, planning to fix that later. --- lib/ansible/executor/play_iterator.py | 31 ++- lib/ansible/playbook/base.py | 2 + test/units/executor/test_play_iterator.py | 255 +++++++++++++++++++++- 3 files changed, 278 insertions(+), 10 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index bd36b5a4175..eec3877d516 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -58,6 +58,9 @@ class HostState: self.always_child_state = None def __repr__(self): + return "HostState(%r)" % self._blocks + + def __str__(self): def _run_state_to_string(n): states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"] try: @@ -90,6 +93,20 @@ class HostState: self.always_child_state, ) + def __eq__(self, other): + if not isinstance(other, HostState): + return False + + for attr in ( + '_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task', + 'cur_role', 'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain', + 'tasks_child_state', 'rescue_child_state', 'always_child_state' + ): + if getattr(self, attr) != getattr(other, attr): + return False + + return True + def get_current_block(self): return self._blocks[self.cur_block] @@ -439,7 +456,7 @@ class PlayIterator: the different processes, and not all data structures are preserved. This method allows us to find the original task passed into the executor engine. ''' - def _search_block(block, task): + def _search_block(block): ''' helper method to check a block's task lists (block/rescue/always) for a given task uuid. If a Block is encountered in the place of a @@ -449,32 +466,32 @@ class PlayIterator: for b in (block.block, block.rescue, block.always): for t in b: if isinstance(t, Block): - res = _search_block(t, task) + res = _search_block(t) if res: return res elif t._uuid == task._uuid: return t return None - def _search_state(state, task): + def _search_state(state): for block in state._blocks: - res = _search_block(block, task) + res = _search_block(block) if res: return res for child_state in (state.tasks_child_state, state.rescue_child_state, state.always_child_state): if child_state is not None: - res = _search_state(child_state, task) + res = _search_state(child_state) if res: return res return None s = self.get_host_state(host) - res = _search_state(s, task) + res = _search_state(s) if res: return res for block in self._play.handlers: - res = _search_block(block, task) + res = _search_block(block) if res: return res diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py index b9eafe44275..d3752adf2f3 100644 --- a/lib/ansible/playbook/base.py +++ b/lib/ansible/playbook/base.py @@ -267,6 +267,8 @@ class Base: new_me._loader = self._loader new_me._variable_manager = self._variable_manager + new_me._uuid = self._uuid + # if the ds value was set on the object, copy it to the new copy too if hasattr(self, '_ds'): new_me._ds = self._ds diff --git a/test/units/executor/test_play_iterator.py b/test/units/executor/test_play_iterator.py index b2310fe242e..d093eba6769 100644 --- a/test/units/executor/test_play_iterator.py +++ b/test/units/executor/test_play_iterator.py @@ -23,8 +23,9 @@ from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock from ansible.errors import AnsibleError, AnsibleParserError -from ansible.executor.play_iterator import PlayIterator +from ansible.executor.play_iterator import HostState, PlayIterator from ansible.playbook import Playbook +from ansible.playbook.task import Task from ansible.playbook.play_context import PlayContext from units.mock.loader import DictDataLoader @@ -37,6 +38,23 @@ class TestPlayIterator(unittest.TestCase): def tearDown(self): pass + def test_host_state(self): + hs = HostState(blocks=[x for x in range(0, 10)]) + hs.tasks_child_state = HostState(blocks=[0]) + hs.rescue_child_state = HostState(blocks=[1]) + hs.always_child_state = HostState(blocks=[2]) + hs.__repr__() + hs.run_state = 100 + hs.__repr__() + hs.fail_state = 15 + hs.__repr__() + + for i in range(0, 10): + hs.cur_block = i + self.assertEqual(hs.get_current_block(), i) + + new_hs = hs.copy() + def test_play_iterator(self): fake_loader = DictDataLoader({ "test_play.yml": """ @@ -48,6 +66,18 @@ class TestPlayIterator(unittest.TestCase): - debug: msg="this is a pre_task" tasks: - debug: msg="this is a regular task" + - block: + - debug: msg="this is a block task" + - block: + - debug: msg="this is a sub-block in a block" + rescue: + - debug: msg="this is a rescue task" + - block: + - debug: msg="this is a sub-block in a rescue" + always: + - debug: msg="this is an always task" + - block: + - debug: msg="this is a sub-block in an always" post_tasks: - debug: msg="this is a post_task" """, @@ -64,10 +94,12 @@ class TestPlayIterator(unittest.TestCase): hosts = [] for i in range(0, 10): - host = MagicMock() - host.get_name.return_value = 'host%02d' % i + host = MagicMock() + host.name = host.get_name.return_value = 'host%02d' % i hosts.append(host) + mock_var_manager._fact_cache['host00'] = dict() + inventory = MagicMock() inventory.get_hosts.return_value = hosts inventory.filter_hosts.return_value = hosts @@ -82,6 +114,18 @@ class TestPlayIterator(unittest.TestCase): all_vars=dict(), ) + # lookup up an original task + target_task = p._entries[0].tasks[0].block[0] + print("the task is: %s (%s)" % (target_task, target_task._uuid)) + task_copy = target_task.copy(exclude_block=True) + print("the copied task is: %s (%s)" % (task_copy, task_copy._uuid)) + found_task = itr.get_original_task(hosts[0], task_copy) + self.assertEqual(target_task, found_task) + + bad_task = Task() + found_task = itr.get_original_task(hosts[0], bad_task) + self.assertIsNone(found_task) + # pre task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) @@ -100,6 +144,38 @@ class TestPlayIterator(unittest.TestCase): self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertIsNone(task._role) + # block task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is a block task")) + # sub-block task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is a sub-block in a block")) + # mark the host failed + itr.mark_host_failed(hosts[0]) + # block rescue task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is a rescue task")) + # sub-block rescue task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is a sub-block in a rescue")) + # block always task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is an always task")) + # sub-block always task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg="this is a sub-block in an always")) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) @@ -116,3 +192,176 @@ class TestPlayIterator(unittest.TestCase): (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNone(task) + # host 0 shouldn't be in the failed hosts, as the error + # was handled by a rescue block + failed_hosts = itr.get_failed_hosts() + self.assertNotIn(hosts[0], failed_hosts) + + def test_play_iterator_nested_blocks(self): + fake_loader = DictDataLoader({ + "test_play.yml": """ + - hosts: all + gather_facts: false + tasks: + - block: + - block: + - block: + - block: + - block: + - debug: msg="this is the first task" + rescue: + - block: + - block: + - block: + - block: + - debug: msg="this is the rescue task" + always: + - block: + - block: + - block: + - block: + - debug: msg="this is the rescue task" + """, + }) + + mock_var_manager = MagicMock() + mock_var_manager._fact_cache = dict() + mock_var_manager.get_vars.return_value = dict() + + p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) + + hosts = [] + for i in range(0, 10): + host = MagicMock() + host.name = host.get_name.return_value = 'host%02d' % i + hosts.append(host) + + inventory = MagicMock() + inventory.get_hosts.return_value = hosts + inventory.filter_hosts.return_value = hosts + + play_context = PlayContext(play=p._entries[0]) + + itr = PlayIterator( + inventory=inventory, + play=p._entries[0], + play_context=play_context, + variable_manager=mock_var_manager, + all_vars=dict(), + ) + + # implicit meta: flush_handlers + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'meta') + # get the first task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + # fail the host + itr.mark_host_failed(hosts[0]) + # get the resuce task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + # get the always task + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'debug') + # implicit meta: flush_handlers + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'meta') + # implicit meta: flush_handlers + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNotNone(task) + self.assertEqual(task.action, 'meta') + # end of iteration + (host_state, task) = itr.get_next_task_for_host(hosts[0]) + self.assertIsNone(task) + + def test_play_iterator_add_tasks(self): + fake_loader = DictDataLoader({ + 'test_play.yml': """ + - hosts: all + gather_facts: no + tasks: + - debug: msg="dummy task" + """, + }) + + mock_var_manager = MagicMock() + mock_var_manager._fact_cache = dict() + mock_var_manager.get_vars.return_value = dict() + + p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) + + hosts = [] + for i in range(0, 10): + host = MagicMock() + host.name = host.get_name.return_value = 'host%02d' % i + hosts.append(host) + + inventory = MagicMock() + inventory.get_hosts.return_value = hosts + inventory.filter_hosts.return_value = hosts + + play_context = PlayContext(play=p._entries[0]) + + itr = PlayIterator( + inventory=inventory, + play=p._entries[0], + play_context=play_context, + variable_manager=mock_var_manager, + all_vars=dict(), + ) + + # test the high-level add_tasks() method + s = HostState(blocks=[0,1,2]) + itr._insert_tasks_into_state = MagicMock(return_value=s) + itr.add_tasks(hosts[0], [3,4,5]) + self.assertEqual(itr._host_states[hosts[0].name], s) + + # now actually test the lower-level method that does the work + itr = PlayIterator( + inventory=inventory, + play=p._entries[0], + play_context=play_context, + variable_manager=mock_var_manager, + all_vars=dict(), + ) + + # iterate past first task + _, task = itr.get_next_task_for_host(hosts[0]) + while(task and task.action != 'debug'): + _, task = itr.get_next_task_for_host(hosts[0]) + + if task is None: + raise Exception("iterated past end of play while looking for place to insert tasks") + + # get the current host state and copy it so we can mutate it + s = itr.get_host_state(hosts[0]) + s_copy = s.copy() + + # assert with an empty task list, or if we're in a failed state, we simply return the state as-is + res_state = itr._insert_tasks_into_state(s_copy, task_list=[]) + self.assertEqual(res_state, s_copy) + + s_copy.fail_state = itr.FAILED_TASKS + res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()]) + self.assertEqual(res_state, s_copy) + + # but if we've failed with a rescue/always block + mock_task = MagicMock() + s_copy.run_state = itr.ITERATING_RESCUE + res_state = itr._insert_tasks_into_state(s_copy, task_list=[mock_task]) + self.assertEqual(res_state, s_copy) + self.assertIn(mock_task, res_state._blocks[res_state.cur_block].rescue) + itr._host_states[hosts[0].name] = res_state + (next_state, next_task) = itr.get_next_task_for_host(hosts[0], peek=True) + self.assertEqual(next_task, mock_task) + itr._host_states[hosts[0].name] = s + + # test a regular insertion + s_copy = s.copy() + res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()]) From 325fccfa78949ee2ff016e24ee359a110dd66892 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 7 Mar 2016 13:16:43 -0500 Subject: [PATCH 0880/1113] added v for least verbose display function --- lib/ansible/utils/display.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 9662b04b3dd..e94c8f0397b 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -159,6 +159,9 @@ class Display: else: logger.info(msg2) + def v(self, msg, host=None): + return self.verbose(msg, host=host, caplevel=0) + def vv(self, msg, host=None): return self.verbose(msg, host=host, caplevel=1) From 098333b2ecad3f99deb95763f2db1530915620c5 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Mon, 7 Mar 2016 15:35:20 -0600 Subject: [PATCH 0881/1113] Fix redirects for get_url * fetch_url shouldn't both accept follow_redirects and support follow_redircts via module.params * Default follow_redirects for open_url should be 'urllib2' * Add redirect test for get_url --- lib/ansible/module_utils/urls.py | 8 +++----- test/integration/roles/test_get_url/tasks/main.yml | 5 +++++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 0d0965e3abe..51779dc4e13 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -676,7 +676,7 @@ def maybe_add_ssl_handler(url, validate_certs): def open_url(url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, - force_basic_auth=False, follow_redirects=False): + force_basic_auth=False, follow_redirects='urllib2'): ''' Fetches a file from an HTTP/FTP server using urllib2 ''' @@ -809,8 +809,7 @@ def url_argument_spec(): ) def fetch_url(module, url, data=None, headers=None, method=None, - use_proxy=True, force=False, last_mod_time=None, timeout=10, - follow_redirects=False): + use_proxy=True, force=False, last_mod_time=None, timeout=10): ''' Fetches a file from an HTTP/FTP server using urllib2. Requires the module environment ''' @@ -828,8 +827,7 @@ def fetch_url(module, url, data=None, headers=None, method=None, http_agent = module.params.get('http_agent', None) force_basic_auth = module.params.get('force_basic_auth', '') - if not follow_redirects: - follow_redirects = module.params.get('follow_redirects', False) + follow_redirects = module.params.get('follow_redirects', 'urllib2') r = None info = dict(url=url) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 9ed0549ec47..46d9ee275d5 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -145,3 +145,8 @@ - 'get_url_result["failed"]' when: "{{ not python_has_ssl_context }}" # End hacky SNI test section + +- name: Test get_url with redirect + get_url: + url: 'http://httpbin.org/redirect/6' + dest: "{{ output_dir }}/redirect.json" From 53bb889ef6bb87180fb01a8a71a0595a2eae2354 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Mon, 7 Mar 2016 15:39:31 -0600 Subject: [PATCH 0882/1113] update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 0bbb7ba38da..45745424f70 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 0bbb7ba38da07d2a9e562834264a2ee2fc9ceaf4 +Subproject commit 45745424f702980a8860ab5ba2d94cdfd0311695 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 39e4040685b..b51efc51bc6 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 39e4040685bf2c36fd59450ac4f9b40158179f9e +Subproject commit b51efc51bc64ef99b389acb6166d0eb46d984085 From 8aee648bc98e7e630b815df9dffc64feb9afe632 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Mon, 7 Mar 2016 17:38:47 -0600 Subject: [PATCH 0883/1113] Add as dependency of the parsing make target --- test/integration/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 7a496f65de2..05856130c6e 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -28,7 +28,7 @@ setup: rm -rf $(TEST_DIR) mkdir -p $(TEST_DIR) -parsing: +parsing: setup ansible-playbook bad_parsing.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5 ansible-playbook good_parsing.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) From e74ab3ecdd5fb5b4fd2c97d9c2b10d370321297c Mon Sep 17 00:00:00 2001 From: Brian Coca <bcoca@ansible.com> Date: Mon, 7 Mar 2016 11:03:53 +0100 Subject: [PATCH 0884/1113] draft 1st release of ansible-console porting @dominis 's ansible-shell tool from 1.9 and integrating it into ansible added verbosity control made more resilitent to several errors added highlight color, to configurable colors more resilient on exception and interruptions prompt coloring, goes red and changes to # when using become = true and root become setting is now explicit and not a toggle --- CHANGELOG.md | 1 + bin/ansible-console | 1 + examples/ansible.cfg | 1 + lib/ansible/cli/console.py | 444 +++++++++++++++++++++++++++++++++++++ lib/ansible/constants.py | 1 + setup.py | 1 + 6 files changed, 449 insertions(+) create mode 120000 bin/ansible-console create mode 100644 lib/ansible/cli/console.py diff --git a/CHANGELOG.md b/CHANGELOG.md index f3d871bb9ca..b961501b79e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ Ansible Changes By Release ###Major Changes: * added facility for modules to send back 'diff' for display when ansible is called with --diff, updated several modules to return this info +* added ansible-console tool, a REPL shell that allows running adhoc tasks against a chosen inventory (based on https://github.com/dominis/ansible-shell ) ####New Modules: * aws: ec2_vol_facts diff --git a/bin/ansible-console b/bin/ansible-console new file mode 120000 index 00000000000..cabb1f519aa --- /dev/null +++ b/bin/ansible-console @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 48628441fb0..6c265e9bf28 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -279,6 +279,7 @@ #special_context_filesystems=nfs,vboxsf,fuse,ramfs [colors] +#higlight = white #verbose = blue #warn = bright purple #error = red diff --git a/lib/ansible/cli/console.py b/lib/ansible/cli/console.py new file mode 100644 index 00000000000..2c96917f367 --- /dev/null +++ b/lib/ansible/cli/console.py @@ -0,0 +1,444 @@ +# (c) 2014, Nandor Sivok <dominis@haxor.hu> +# (c) 2016, Redhat Inc +# +# ansible-shell is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ansible-shell is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +######################################################## +# ansible-console is an interactive REPL shell for ansible +# with built-in tab completion for all the documented modules +# +# Available commands: +# cd - change host/group (you can use host patterns eg.: app*.dc*:!app01*) +# list - list available hosts in the current path +# forks - change fork +# become - become +# ! - forces shell module instead of the ansible module (!yum update -y) + +import atexit +import cmd +import getpass +import readline +import os +import sys + +from ansible import constants as C +from ansible.cli import CLI +from ansible.errors import AnsibleError, AnsibleOptionsError + +from ansible.executor.task_queue_manager import TaskQueueManager +from ansible.inventory import Inventory +from ansible.parsing.dataloader import DataLoader +from ansible.parsing.splitter import parse_kv +from ansible.playbook.play import Play +from ansible.vars import VariableManager +from ansible.utils import module_docs +from ansible.utils.color import stringc +from ansible.utils.unicode import to_unicode, to_str +from ansible.plugins import module_loader + + +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + + +class ConsoleCLI(CLI, cmd.Cmd): + + modules = [] + + def __init__(self, args): + + super(ConsoleCLI, self).__init__(args) + + self.intro = 'Welcome to the ansible console.\nType help or ? to list commands.\n' + + self.groups = [] + self.hosts = [] + self.pattern = None + self.variable_manager = None + self.loader = None + self.passwords = dict() + + self.modules = None + cmd.Cmd.__init__(self) + + def parse(self): + self.parser = CLI.base_parser( + usage='%prog <host-pattern> [options]', + runas_opts=True, + inventory_opts=True, + connect_opts=True, + check_opts=True, + vault_opts=True, + fork_opts=True, + module_opts=True, + ) + + # options unique to shell + self.parser.add_option('--step', dest='step', action='store_true', + help="one-step-at-a-time: confirm each task before running") + + self.parser.set_defaults(cwd='*') + self.options, self.args = self.parser.parse_args(self.args[1:]) + + display.verbosity = self.options.verbosity + self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True) + + return True + + def get_names(self): + return dir(self) + + def cmdloop(self): + try: + cmd.Cmd.cmdloop(self) + except KeyboardInterrupt: + self.do_exit(self) + + def set_prompt(self): + login_user = self.options.remote_user or getpass.getuser() + self.selected = self.inventory.list_hosts(self.options.cwd) + prompt = "%s@%s (%d)[f:%s]" % (login_user, self.options.cwd, len(self.selected), self.options.forks) + if self.options.become and self.options.become_user in [None, 'root']: + prompt += "# " + color = C.COLOR_ERROR + else: + prompt += "$ " + color = C.COLOR_HIGHLIGHT + self.prompt = stringc(prompt, color) + + def list_modules(self): + modules = set() + if self.options.module_path is not None: + for i in self.options.module_path.split(os.pathsep): + module_loader.add_directory(i) + + module_paths = module_loader._get_paths() + for path in module_paths: + if path is not None: + modules.update(self._find_modules_in_path(path)) + return modules + + def _find_modules_in_path(self, path): + + if os.path.isdir(path): + for module in os.listdir(path): + if module.startswith('.'): + continue + elif os.path.isdir(module): + self._find_modules_in_path(module) + elif module.startswith('__'): + continue + elif any(module.endswith(x) for x in C.BLACKLIST_EXTS): + continue + elif module in C.IGNORE_FILES: + continue + elif module.startswith('_'): + fullpath = '/'.join([path,module]) + if os.path.islink(fullpath): # avoids aliases + continue + module = module.replace('_', '', 1) + + module = os.path.splitext(module)[0] # removes the extension + yield module + + def default(self, arg, forceshell=False): + """ actually runs modules """ + if arg.startswith("#"): + return False + + if not self.options.cwd: + display.error("No host found") + return False + + if arg.split()[0] in self.modules: + module = arg.split()[0] + module_args = ' '.join(arg.split()[1:]) + else: + module = 'shell' + module_args = arg + + if forceshell is True: + module = 'shell' + module_args = arg + + self.options.module_name = module + + result = None + try: + play_ds = dict( + name = "Ansible Shell", + hosts = self.options.cwd, + gather_facts = 'no', + #tasks = [ dict(action=dict(module=module, args=parse_kv(module_args)), async=self.options.async, poll=self.options.poll_interval) ] + tasks = [ dict(action=dict(module=module, args=parse_kv(module_args)))] + ) + play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader) + except Exception as e: + display.error(u"Unable to build command: %s" % to_unicode(e)) + return False + + try: + cb = 'minimal' #FIXME: make callbacks configurable + # now create a task queue manager to execute the play + self._tqm = None + try: + self._tqm = TaskQueueManager( + inventory=self.inventory, + variable_manager=self.variable_manager, + loader=self.loader, + options=self.options, + passwords=self.passwords, + stdout_callback=cb, + run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, + run_tree=False, + ) + + result = self._tqm.run(play) + finally: + if self._tqm: + self._tqm.cleanup() + + if result is None: + display.error("No hosts found") + return False + except KeyboardInterrupt: + display.error('User interrupted execution') + return False + except Exception as e: + display.error(to_unicode(e)) + #FIXME: add traceback in very very verbose mode + return False + + def emptyline(self): + return + + def do_shell(self, arg): + """ + You can run shell commands through the shell module. + + eg.: + shell ps uax | grep java | wc -l + shell killall python + shell halt -n + + You can use the ! to force the shell module. eg.: + !ps aux | grep java | wc -l + """ + self.default(arg, True) + + def do_forks(self, arg): + """Set the number of forks""" + if not arg: + display.display('Usage: forks <number>') + return + self.options.forks = int(arg) + self.set_prompt() + + do_serial = do_forks + + def do_verbosity(self, arg): + """Set verbosity level""" + if not arg: + display.display('Usage: verbosity <number>') + else: + display.verbosity = int(arg) + display.v('verbosity level set to %s' % arg) + + def do_cd(self, arg): + """ + Change active host/group. You can use hosts patterns as well eg.: + cd webservers + cd webservers:dbservers + cd webservers:!phoenix + cd webservers:&staging + cd webservers:dbservers:&staging:!phoenix + """ + if not arg: + self.options.cwd = '*' + elif arg == '..': + try: + self.options.cwd = self.inventory.groups_for_host(self.options.cwd)[1].name + except Exception: + self.options.cwd = '' + elif arg in '/*': + self.options.cwd = 'all' + elif self.inventory.get_hosts(arg): + self.options.cwd = arg + else: + display.display("no host matched") + + self.set_prompt() + + def do_list(self, arg): + """List the hosts in the current group""" + if arg == 'groups': + for group in self.groups: + display.display(group) + else: + for host in self.selected: + display.display(host.name) + + def do_become(self, arg): + """Toggle whether plays run with become""" + if arg: + self.options.become_user = arg + display.v("become changed to %s" % self.options.become) + self.set_prompt() + else: + display.display("Please specify become value, e.g. `become yes`") + + def do_remote_user(self, arg): + """Given a username, set the remote user plays are run by""" + if arg: + self.options.remote_user = arg + self.set_prompt() + else: + display.display("Please specify a remote user, e.g. `remote_user root`") + + def do_become_user(self, arg): + """Given a username, set the user that plays are run by when using become""" + if arg: + self.options.become_user = arg + else: + display.display("Please specify a user, e.g. `become_user jenkins`") + display.v("Current user is %s" % self.options.become_user) + self.set_prompt() + + def do_become_method(self, arg): + """Given a become_method, set the privilege escalation method when using become""" + if arg: + self.options.become_method = arg + display.v("become_method changed to %s" % self.options.become_method) + else: + display.display("Please specify a become_method, e.g. `become_method su`") + + def do_exit(self, args): + """Exits from the console""" + sys.stdout.write('\n') + return -1 + + do_EOF = do_exit + + def helpdefault(self, module_name): + if module_name in self.modules: + in_path = module_loader.find_plugin(module_name) + if in_path: + oc, a, _ = module_docs.get_docstring(in_path) + if oc: + display.display(oc['short_description']) + display.display('Parameters:') + for opt in oc['options'].keys(): + display.display(' ' + stringc(opt, C.COLOR_HIGHLIGHT) + ' ' + oc['options'][opt]['description'][0]) + else: + display.error('No documentation found for %s.' % module_name) + else: + display.error('%s is not a valid command, use ? to list all valid commands.' % module_name) + + def complete_cd(self, text, line, begidx, endidx): + mline = line.partition(' ')[2] + offs = len(mline) - len(text) + + if self.options.cwd in ('all','*','\\'): + completions = self.hosts + self.groups + else: + completions = [x.name for x in self.inventory.list_hosts(self.options.cwd)] + + return [to_str(s)[offs:] for s in completions if to_str(s).startswith(to_str(mline))] + + def completedefault(self, text, line, begidx, endidx): + if line.split()[0] in self.modules: + mline = line.split(' ')[-1] + offs = len(mline) - len(text) + completions = self.module_args(line.split()[0]) + + return [s[offs:] + '=' for s in completions if s.startswith(mline)] + + def module_args(self, module_name): + in_path = module_loader.find_plugin(module_name) + oc, a, _ = module_docs.get_docstring(in_path) + return oc['options'].keys() + + + def run(self): + + super(ConsoleCLI, self).run() + + sshpass = None + becomepass = None + vault_pass = None + + # hosts + if len(self.args) != 1: + self.pattern = 'all' + else: + self.pattern = self.args[0] + self.options.cwd = self.pattern + + + # dynamically add modules as commands + self.modules = self.list_modules() + for module in self.modules: + setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg)) + setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module)) + + self.normalize_become_options() + (sshpass, becomepass) = self.ask_passwords() + self.passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } + + self.loader = DataLoader() + + if self.options.vault_password_file: + # read vault_pass from a file + vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=self.loader) + self.loader.set_vault_password(vault_pass) + elif self.options.ask_vault_pass: + vault_pass = self.ask_vault_passwords()[0] + self.loader.set_vault_password(vault_pass) + + self.variable_manager = VariableManager() + self.inventory = Inventory(loader=self.loader, variable_manager=self.variable_manager, host_list=self.options.inventory) + self.variable_manager.set_inventory(self.inventory) + + if len(self.inventory.list_hosts(self.pattern)) == 0: + # Empty inventory + display.warning("provided hosts list is empty, only localhost is available") + + self.inventory.subset(self.options.subset) + self.groups = self.inventory.list_groups() + self.hosts = [x.name for x in self.inventory.list_hosts(self.pattern)] + + # This hack is to work around readline issues on a mac: + # http://stackoverflow.com/a/7116997/541202 + if 'libedit' in readline.__doc__: + readline.parse_and_bind("bind ^I rl_complete") + else: + readline.parse_and_bind("tab: complete") + + histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history") + try: + readline.read_history_file(histfile) + except IOError: + pass + + atexit.register(readline.write_history_file, histfile) + self.set_prompt() + self.cmdloop() + diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index d38dde6eb4e..796073c95bc 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -274,6 +274,7 @@ DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True ) # colors +COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white') COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue') COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple') COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red') diff --git a/setup.py b/setup.py index f174f979f83..4fa964012de 100644 --- a/setup.py +++ b/setup.py @@ -49,6 +49,7 @@ setup(name='ansible', 'bin/ansible-pull', 'bin/ansible-doc', 'bin/ansible-galaxy', + 'bin/ansible-shell', 'bin/ansible-vault', ], data_files=[], From e24e619cf1d8d54442acdf580bf8dd0bd88a0ffe Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 7 Mar 2016 20:51:58 -0500 Subject: [PATCH 0885/1113] added stderr from vault script to error --- lib/ansible/cli/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index cfc2e95779b..e901940f039 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -524,7 +524,7 @@ class CLI(object): raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("Vault password script %s returned non-zero (%s)." % (this_path, p.returncode)) + raise AnsibleError("Vault password script %s returned non-zero (%s): %s" % (this_path, p.returncode, p.stderr)) vault_pass = stdout.strip('\r\n') else: try: From 2f472fd4e961ff766fac61b2c235b84dbbd9d89e Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 8 Mar 2016 01:07:00 -0500 Subject: [PATCH 0886/1113] Adding start of ssh connection unit tests --- .../connections/test_connection_ssh.py | 267 ++++++++++++++++++ 1 file changed, 267 insertions(+) create mode 100644 test/units/plugins/connections/test_connection_ssh.py diff --git a/test/units/plugins/connections/test_connection_ssh.py b/test/units/plugins/connections/test_connection_ssh.py new file mode 100644 index 00000000000..1721a3ff757 --- /dev/null +++ b/test/units/plugins/connections/test_connection_ssh.py @@ -0,0 +1,267 @@ +# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +from io import StringIO + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock, mock_open + +from ansible.playbook.play_context import PlayContext +from ansible.plugins.connection import ssh + +class TestConnectionBaseClass(unittest.TestCase): + + def test_plugins_connection_ssh_basic(self): + pc = PlayContext() + new_stdin = StringIO() + conn = ssh.Connection(pc, new_stdin) + + # connect just returns self, so assert that + res = conn._connect() + self.assertEqual(conn, res) + + ssh.SSHPASS_AVAILABLE = False + self.assertFalse(conn._sshpass_available()) + + ssh.SSHPASS_AVAILABLE = True + self.assertTrue(conn._sshpass_available()) + + with patch('subprocess.Popen') as p: + ssh.SSHPASS_AVAILABLE = None + p.return_value = MagicMock() + self.assertTrue(conn._sshpass_available()) + + ssh.SSHPASS_AVAILABLE = None + p.return_value = None + p.side_effect = OSError() + self.assertFalse(conn._sshpass_available()) + + conn.close() + self.assertFalse(conn._connected) + + def test_plugins_connection_ssh__build_command(self): + pc = PlayContext() + new_stdin = StringIO() + conn = ssh.Connection(pc, new_stdin) + conn._build_command('ssh') + + def test_plugins_connection_ssh_exec_command(self): + pc = PlayContext() + new_stdin = StringIO() + conn = ssh.Connection(pc, new_stdin) + + conn._build_command = MagicMock() + conn._build_command.return_value = 'ssh something something' + conn._run = MagicMock() + conn._run.return_value = (0, 'stdout', 'stderr') + + res, stdout, stderr = conn._exec_command('ssh') + res, stdout, stderr = conn._exec_command('ssh', 'this is some data') + + def test_plugins_connection_ssh__exec_command(self): + pc = PlayContext() + new_stdin = StringIO() + conn = ssh.Connection(pc, new_stdin) + + @patch('select.select') + @patch('fcntl.fcntl') + @patch('os.write') + @patch('os.close') + @patch('pty.openpty') + @patch('subprocess.Popen') + def test_plugins_connection_ssh__run(self, mock_Popen, mock_openpty, mock_osclose, mock_oswrite, mock_fcntl, mock_select): + pc = PlayContext() + new_stdin = StringIO() + + conn = ssh.Connection(pc, new_stdin) + conn._send_initial_data = MagicMock() + conn._examine_output = MagicMock() + conn._terminate_process = MagicMock() + conn.sshpass_pipe = [MagicMock(), MagicMock()] + + mock_popen_res = MagicMock() + mock_popen_res.poll = MagicMock() + mock_popen_res.wait = MagicMock() + mock_popen_res.stdin = MagicMock() + mock_popen_res.stdin.fileno.return_value = 1000 + mock_popen_res.stdout = MagicMock() + mock_popen_res.stdout.fileno.return_value = 1001 + mock_popen_res.stderr = MagicMock() + mock_popen_res.stderr.fileno.return_value = 1002 + mock_popen_res.return_code = 0 + mock_Popen.return_value = mock_popen_res + + def _mock_select(rlist, wlist, elist, timeout=None): + rvals = [] + if mock_popen_res.stdin in rlist: + rvals.append(mock_popen_res.stdin) + if mock_popen_res.stderr in rlist: + rvals.append(mock_popen_res.stderr) + return (rvals, [], []) + + mock_select.side_effect = _mock_select + + mock_popen_res.stdout.read.side_effect = ["some data", ""] + mock_popen_res.stderr.read.side_effect = [""] + conn._run("ssh", "this is input data") + + # test with a password set to trigger the sshpass write + pc.password = '12345' + mock_popen_res.stdout.read.side_effect = ["some data", "", ""] + mock_popen_res.stderr.read.side_effect = [""] + conn._run(["ssh", "is", "a", "cmd"], "this is more data") + + # test with password prompting enabled + pc.password = None + pc.prompt = True + mock_popen_res.stdout.read.side_effect = ["some data", "", ""] + mock_popen_res.stderr.read.side_effect = [""] + conn._run("ssh", "this is input data") + + # test with some become settings + pc.prompt = False + pc.become = True + pc.success_key = 'BECOME-SUCCESS-abcdefg' + mock_popen_res.stdout.read.side_effect = ["some data", "", ""] + mock_popen_res.stderr.read.side_effect = [""] + conn._run("ssh", "this is input data") + + # simulate no data input + mock_openpty.return_value = (98, 99) + mock_popen_res.stdout.read.side_effect = ["some data", "", ""] + mock_popen_res.stderr.read.side_effect = [""] + conn._run("ssh", "") + + # simulate no data input but Popen using new pty's fails + mock_Popen.return_value = None + mock_Popen.side_effect = [OSError(), mock_popen_res] + mock_popen_res.stdout.read.side_effect = ["some data", "", ""] + mock_popen_res.stderr.read.side_effect = [""] + conn._run("ssh", "") + + def test_plugins_connection_ssh__examine_output(self): + pc = PlayContext() + new_stdin = StringIO() + + conn = ssh.Connection(pc, new_stdin) + + conn.check_password_prompt = MagicMock() + conn.check_become_success = MagicMock() + conn.check_incorrect_password = MagicMock() + conn.check_missing_password = MagicMock() + + def _check_password_prompt(line): + if 'foo' in line: + return True + return False + + def _check_become_success(line): + if 'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' in line: + return True + return False + + def _check_incorrect_password(line): + if 'incorrect password' in line: + return True + return False + + def _check_missing_password(line): + if 'bad password' in line: + return True + return False + + conn.check_password_prompt.side_effect = _check_password_prompt + conn.check_become_success.side_effect = _check_become_success + conn.check_incorrect_password.side_effect = _check_incorrect_password + conn.check_missing_password.side_effect = _check_missing_password + + # test examining output for prompt + conn._flags = dict( + become_prompt = False, + become_success = False, + become_error = False, + become_nopasswd_error = False, + ) + + pc.prompt = True + output, unprocessed = conn._examine_output('source', 'state', 'line 1\nline 2\nfoo\nline 3\nthis should be the remainder', False) + self.assertEqual(output, 'line 1\nline 2\nline 3\n') + self.assertEqual(unprocessed, 'this should be the remainder') + self.assertTrue(conn._flags['become_prompt']) + self.assertFalse(conn._flags['become_success']) + self.assertFalse(conn._flags['become_error']) + self.assertFalse(conn._flags['become_nopasswd_error']) + + # test examining output for become prompt + conn._flags = dict( + become_prompt = False, + become_success = False, + become_error = False, + become_nopasswd_error = False, + ) + + pc.prompt = False + pc.success_key = 'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' + output, unprocessed = conn._examine_output('source', 'state', 'line 1\nline 2\nBECOME-SUCCESS-abcdefghijklmnopqrstuvxyz\nline 3\n', False) + self.assertEqual(output, 'line 1\nline 2\nline 3\n') + self.assertEqual(unprocessed, '') + self.assertFalse(conn._flags['become_prompt']) + self.assertTrue(conn._flags['become_success']) + self.assertFalse(conn._flags['become_error']) + self.assertFalse(conn._flags['become_nopasswd_error']) + + # test examining output for become failure + conn._flags = dict( + become_prompt = False, + become_success = False, + become_error = False, + become_nopasswd_error = False, + ) + + pc.prompt = False + pc.success_key = None + output, unprocessed = conn._examine_output('source', 'state', 'line 1\nline 2\nincorrect password\n', True) + self.assertEqual(output, 'line 1\nline 2\nincorrect password\n') + self.assertEqual(unprocessed, '') + self.assertFalse(conn._flags['become_prompt']) + self.assertFalse(conn._flags['become_success']) + self.assertTrue(conn._flags['become_error']) + self.assertFalse(conn._flags['become_nopasswd_error']) + + # test examining output for missing password + conn._flags = dict( + become_prompt = False, + become_success = False, + become_error = False, + become_nopasswd_error = False, + ) + + pc.prompt = False + pc.success_key = None + output, unprocessed = conn._examine_output('source', 'state', 'line 1\nbad password\n', True) + self.assertEqual(output, 'line 1\nbad password\n') + self.assertEqual(unprocessed, '') + self.assertFalse(conn._flags['become_prompt']) + self.assertFalse(conn._flags['become_success']) + self.assertFalse(conn._flags['become_error']) + self.assertTrue(conn._flags['become_nopasswd_error']) + From 2f90a4f4e25f235a3a5265a57f3d42a4262e5733 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Tue, 8 Mar 2016 07:42:42 -0600 Subject: [PATCH 0887/1113] Strip proc_1 before testing it. Fixes #14858 --- lib/ansible/module_utils/facts.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 39dcd5455a9..1aa16c9feeb 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -581,13 +581,16 @@ class Facts(object): else: proc_1 = os.path.basename(proc_1) + if proc_1 is not None: + proc_1 = proc_1.strip() + if proc_1 == 'init' or proc_1.endswith('sh'): # many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container proc_1 = None # if not init/None it should be an identifiable or custom init, so we are done! if proc_1 is not None: - self.facts['service_mgr'] = proc_1.strip() + self.facts['service_mgr'] = proc_1 # start with the easy ones elif self.facts['distribution'] == 'MacOSX': From e903b24cb7b96fd33fdadbd1ff324700559f5734 Mon Sep 17 00:00:00 2001 From: Jason McKerr <jmckerr@jmckerr-OSX.local> Date: Tue, 8 Mar 2016 10:37:30 -0500 Subject: [PATCH 0888/1113] initial checkin of the new Community Committer Guidelines --- docsite/rst/committer_guidelines.rst | 71 ++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 docsite/rst/committer_guidelines.rst diff --git a/docsite/rst/committer_guidelines.rst b/docsite/rst/committer_guidelines.rst new file mode 100644 index 00000000000..8dc760cde00 --- /dev/null +++ b/docsite/rst/committer_guidelines.rst @@ -0,0 +1,71 @@ +Committers Guidelines (for people with commit rights to Ansible on GitHub) +`````````````````````````````````````````````````````````````````````````` + +These are the guidelines for people with commit access to Ansible. Committers are essentially acting as members of the Ansible Core team, although not necessarily as an employee of Ansible and Red Hat. Please read the guidelines before you commit. + +These guidelines apply to everyone. At the same time, this ISN’T a process document. So just use good judgement. You’ve been given commit access because we trust your judgement. + +That said, use the trust wisely. + +If you abuse the trust and break components and builds, or waste a lot of time asking people to review incomplete or untested pull requests, the trust level falls and you may be asked not to commit or you may lose access to do so. + +Features, High Level Design, and Roadmap +======================================== + +As a core team member you will be part of the team that actually develops the roadmap! So be engaged and push for what you want. However, Red Hat as a company will commit to certain features, fixes, APIs, etc. for various releases. The company and the Ansible team still has to get those done and out the door. Obligations to users, the community, and customers come first. Because of that, a feature you may want to develop yourself may not get into a release if it impacts a lot of other parts of Ansible. + +Any other new features and changes to high level design should go through the proposal process (TBD), to ensure the community and core team have had a chance to review the idea and approve it. The core team will have sole responsibility for merging new features based on proposals. + +Our Workflow on GitHub +====================== + +As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you’re aware of the following workflow steps: + +* Fork the repository upon which you want to do some work +* Work on the specific branch upon which you need to commit +* Create a Pull Request and tag the people you would like to review; assign someone as the primary “owner” of your request +* Adjust code as necessary based on the Comments provided +* Ask someone on the Core Team to do a final review and merge + +Addendum to workflow for Committers: +------------------------------------ + +The Core Team is aware that this can be a difficult process at times. Sometimes, the team breaks the rules: Direct commits, merging their own PRs. This section is a set of guidelines. If you’re changing a comma in a doc, or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work. + +Roles on Core +============= +* Core Committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs. +* Module Owners: Module Owners own specific modules and have indirect commit access via the current module PR mechanisms. + +General Rules +============= +Individuals with direct commit access to ansible/ansible (+core, + extras) are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than a list of what you *can* do, this is a list of what you *should not* do and, in lieu of anything else, individuals with this power are expected to use their best judgement. + +* Don’t commit directly. +* PRs that have tests will be looked at with more priority than PRs without tests. Of course not all changes require tests, but for bug fixes or functionality changes, please add tests. +* Documentation. If your PR is new feature or a change to behavior, make sure you’ve updated associated documentation or notified the right people to do so. It also helps to add the version of Core against which this documentation is compatible (to avoid confusion with stable versus devel docs, for backwards compatibility, etc.). +* Someone else should merge your pull requests. If you are a Core Committer you have leeway here for minor changes. +* After a merge clean up dead forks/branches. Don’t leave a mess hanging around. +* Consider backwards compatibility (don’t break existing playbooks). +* Consider alternate environments (yes, people have bad environments, but they are the ones that need us the most). +* Always discuss the technical merits, never address the person’s limitations (you can later go for beers and call them idiots, but not in IRC/Github/etc). +* Consider the maintenance burden, some things are cool to have, but might not be worth shoehorning in. +* Complexity breeds all kinds of problems, so keep it simple. +* Lastly, comitters that have no activity on the project (merges, triage, commits, etc) will have permissions suspended. + +Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community. + + +People +Individuals who have been asked to become part of this group have generally been contributing in significant ways to the Ansible community for some time. Should they agree, they are requested to add their names & github IDs to this file below via pull request, indicating that they agree to act in the ways that their fellow committers trust that they will act. + +* James Cammarata (RedHat/Ansible) +* Brian Coca (RedHat/Ansible) +* Matt Davis (RedHat/Ansible) +* Toshio Kuratomi (RedHat/Ansible) +* Jason McKerr (RedHat/Ansible) +* Robyn Bergeron (RedHat/Ansible) +* Greg DeKoenigsberg (RedHat/Ansible +* Monty Taylor +* Matt Martz + From 4af867632990bb4513944e99416a09d0bbb2a815 Mon Sep 17 00:00:00 2001 From: Jason McKerr <jmckerr@jmckerr-OSX.local> Date: Tue, 8 Mar 2016 10:42:47 -0500 Subject: [PATCH 0889/1113] fix formatting for people section --- docsite/rst/committer_guidelines.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/committer_guidelines.rst b/docsite/rst/committer_guidelines.rst index 8dc760cde00..13fd530dc68 100644 --- a/docsite/rst/committer_guidelines.rst +++ b/docsite/rst/committer_guidelines.rst @@ -57,6 +57,7 @@ Committers are expected to continue to follow the same community and contributio People +====== Individuals who have been asked to become part of this group have generally been contributing in significant ways to the Ansible community for some time. Should they agree, they are requested to add their names & github IDs to this file below via pull request, indicating that they agree to act in the ways that their fellow committers trust that they will act. * James Cammarata (RedHat/Ansible) From 49bb4803b544eb1a436084ac96a80c55ccc96d62 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Tue, 8 Mar 2016 10:55:38 -0500 Subject: [PATCH 0890/1113] clarify --step prompt now shows full words and indicates default fixes #7433 --- lib/ansible/plugins/strategy/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 9f4e504cf05..38752114021 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -623,10 +623,10 @@ class StrategyBase: def _take_step(self, task, host=None): ret=False + msg=u'Perform task: %s ' % task if host: - msg = u'Perform task: %s on %s (y/n/c): ' % (task, host) - else: - msg = u'Perform task: %s (y/n/c): ' % task + msg += u'on %s ' % host + msg += u'(N)o/(y)es/(c)ontinue: ' resp = display.prompt(msg) if resp.lower() in ['y','yes']: From 175351f200cd5773de1bbc9a438a934679ff60e7 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Tue, 8 Mar 2016 17:23:34 +0100 Subject: [PATCH 0891/1113] Little improvement in flow Don't get the hostname if it's not needed. --- lib/ansible/plugins/callback/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 3a33ddbc4f1..6bf71243985 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -334,8 +334,8 @@ class CallbackBase: self.playbook_on_stats(stats) def v2_on_file_diff(self, result): - host = result._host.get_name() if 'diff' in result._result: + host = result._host.get_name() self.on_file_diff(host, result._result['diff']) def v2_playbook_on_include(self, included_file): From f5b6f52940bbea87daf35068a5f91e02f6b9af02 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Tue, 8 Mar 2016 17:33:29 +0100 Subject: [PATCH 0892/1113] Only show diff when the task actually induced a change This implements solution #1 in the proposal #14860. It only shows the diff if the task induced a change, which means that if the changed_when control overrides the task, not diff will be produced. See #14860 for a rationale and the use-case. --- lib/ansible/plugins/callback/default.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 072eb5f4d25..e4f583db3d7 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -150,11 +150,11 @@ class CallbackModule(CallbackBase): def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: for res in result._result['results']: - if 'diff' in res and res['diff']: + if 'diff' in res and res['diff'] and res.get('changed', False): diff = self._get_diff(res['diff']) if diff: self._display.display(diff) - elif 'diff' in result._result and result._result['diff']: + elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False): diff = self._get_diff(result._result['diff']) if diff: self._display.display(diff) From f0d3284ead6a1f02f689c51b328c912ff354adba Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 8 Mar 2016 14:49:07 -0500 Subject: [PATCH 0893/1113] Adding more unit tests for ssh connection plugin --- .../connections/test_connection_ssh.py | 113 +++++++++++++++++- 1 file changed, 107 insertions(+), 6 deletions(-) diff --git a/test/units/plugins/connections/test_connection_ssh.py b/test/units/plugins/connections/test_connection_ssh.py index 1721a3ff757..efdbf2b7024 100644 --- a/test/units/plugins/connections/test_connection_ssh.py +++ b/test/units/plugins/connections/test_connection_ssh.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- # (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible @@ -19,14 +21,18 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import pipes import sys from io import StringIO from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock, mock_open +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.playbook.play_context import PlayContext from ansible.plugins.connection import ssh +from ansible.utils.unicode import to_bytes, to_unicode class TestConnectionBaseClass(unittest.TestCase): @@ -64,7 +70,7 @@ class TestConnectionBaseClass(unittest.TestCase): conn = ssh.Connection(pc, new_stdin) conn._build_command('ssh') - def test_plugins_connection_ssh_exec_command(self): + def test_plugins_connection_ssh__exec_command(self): pc = PlayContext() new_stdin = StringIO() conn = ssh.Connection(pc, new_stdin) @@ -77,11 +83,6 @@ class TestConnectionBaseClass(unittest.TestCase): res, stdout, stderr = conn._exec_command('ssh') res, stdout, stderr = conn._exec_command('ssh', 'this is some data') - def test_plugins_connection_ssh__exec_command(self): - pc = PlayContext() - new_stdin = StringIO() - conn = ssh.Connection(pc, new_stdin) - @patch('select.select') @patch('fcntl.fcntl') @patch('os.write') @@ -265,3 +266,103 @@ class TestConnectionBaseClass(unittest.TestCase): self.assertFalse(conn._flags['become_error']) self.assertTrue(conn._flags['become_nopasswd_error']) + @patch('time.sleep') + def test_plugins_connection_ssh_exec_command(self, mock_sleep): + pc = PlayContext() + new_stdin = StringIO() + conn = ssh.Connection(pc, new_stdin) + conn._build_command = MagicMock() + conn._exec_command = MagicMock() + + C.ANSIBLE_SSH_RETRIES = 9 + + # test a regular, successful execution + conn._exec_command.return_value = (0, 'stdout', '') + res = conn.exec_command('ssh', 'some data') + + # test a retry, followed by success + conn._exec_command.return_value = None + conn._exec_command.side_effect = [(255, '', ''), (0, 'stdout', '')] + res = conn.exec_command('ssh', 'some data') + + # test multiple failures + conn._exec_command.side_effect = [(255, '', '')]*10 + self.assertRaises(AnsibleConnectionFailure, conn.exec_command, 'ssh', 'some data') + + # test other failure from exec_command + conn._exec_command.side_effect = [Exception('bad')]*10 + self.assertRaises(Exception, conn.exec_command, 'ssh', 'some data') + + @patch('os.path.exists') + def test_plugins_connection_ssh_put_file(self, mock_ospe): + pc = PlayContext() + new_stdin = StringIO() + conn = ssh.Connection(pc, new_stdin) + conn._build_command = MagicMock() + conn._run = MagicMock() + + mock_ospe.return_value = True + conn._build_command.return_value = 'some command to run' + conn._run.return_value = (0, '', '') + conn.host = "some_host" + + # test with C.DEFAULT_SCP_IF_SSH enabled + C.DEFAULT_SCP_IF_SSH = True + res = conn.put_file('/path/to/in/file', '/path/to/dest/file') + conn._run.assert_called_with('some command to run', None) + + res = conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩') + conn._run.assert_called_with('some command to run', None) + + # test with C.DEFAULT_SCP_IF_SSH disabled + C.DEFAULT_SCP_IF_SSH = False + expected_in_data = b"put {0} {1}\n".format(pipes.quote('/path/to/in/file'), pipes.quote('/path/to/dest/file')) + res = conn.put_file('/path/to/in/file', '/path/to/dest/file') + conn._run.assert_called_with('some command to run', expected_in_data) + + expected_in_data = b"put {0} {1}\n".format(pipes.quote(to_bytes('/path/to/in/file/with/unicode-fö〩')), pipes.quote(to_bytes('/path/to/dest/file/with/unicode-fö〩'))) + res = conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩') + conn._run.assert_called_with('some command to run', expected_in_data) + + # test that a non-zero rc raises an error + conn._run.return_value = (1, 'stdout', 'some errors') + self.assertRaises(AnsibleError, conn.put_file, '/path/to/bad/file', '/remote/path/to/file') + + # test that a not-found path raises an error + mock_ospe.return_value = False + conn._run.return_value = (0, 'stdout', '') + self.assertRaises(AnsibleFileNotFound, conn.put_file, '/path/to/bad/file', '/remote/path/to/file') + + def test_plugins_connection_ssh_fetch_file(self): + pc = PlayContext() + new_stdin = StringIO() + conn = ssh.Connection(pc, new_stdin) + conn._build_command = MagicMock() + conn._run = MagicMock() + + conn._build_command.return_value = 'some command to run' + conn._run.return_value = (0, '', '') + conn.host = "some_host" + + # test with C.DEFAULT_SCP_IF_SSH enabled + C.DEFAULT_SCP_IF_SSH = True + res = conn.fetch_file('/path/to/in/file', '/path/to/dest/file') + conn._run.assert_called_with('some command to run', None) + + res = conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩') + conn._run.assert_called_with('some command to run', None) + + # test with C.DEFAULT_SCP_IF_SSH disabled + C.DEFAULT_SCP_IF_SSH = False + expected_in_data = b"get {0} {1}\n".format(pipes.quote('/path/to/in/file'), pipes.quote('/path/to/dest/file')) + res = conn.fetch_file('/path/to/in/file', '/path/to/dest/file') + conn._run.assert_called_with('some command to run', expected_in_data) + + expected_in_data = b"get {0} {1}\n".format(pipes.quote(to_bytes('/path/to/in/file/with/unicode-fö〩')), pipes.quote(to_bytes('/path/to/dest/file/with/unicode-fö〩'))) + res = conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩') + conn._run.assert_called_with('some command to run', expected_in_data) + + # test that a non-zero rc raises an error + conn._run.return_value = (1, 'stdout', 'some errors') + self.assertRaises(AnsibleError, conn.fetch_file, '/path/to/bad/file', '/remote/path/to/file') + From 1a5ee115f0b0677de38dc60091530859a76dee13 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 8 Mar 2016 14:49:18 -0500 Subject: [PATCH 0894/1113] Fixing minor logic error in error detection/handling in ssh connection plugin If max retries were reached, no AnsibleConnectionFailure was raised, which means potentially in some cases an unreachable error might not be returned --- lib/ansible/plugins/connection/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index 328ace55374..c24a5623ed3 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -591,7 +591,7 @@ class Connection(ConnectionBase): # 0 = success # 1-254 = remote command return code # 255 = failure from the ssh command itself - if return_tuple[0] != 255 or attempt == (remaining_tries - 1): + if return_tuple[0] != 255: break else: raise AnsibleConnectionFailure("Failed to connect to the host via ssh.") From b01caa371fc36a6ca5cb41f7dfe194e2c5f9f78c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 8 Mar 2016 15:34:37 -0500 Subject: [PATCH 0895/1113] Don't use notices for travis irc messages --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index df224937cfe..f90bf997b5b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,6 +37,5 @@ notifications: - "chat.freenode.net#ansible-notices" on_success: change on_failure: always - use_notice: true skip_join: true nick: ansibletravis From 83e748e31520ffe5e29bf8a04efba96999f9cd60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Fievet?= <_@sebastien-fievet.fr> Date: Wed, 9 Mar 2016 11:19:39 +0700 Subject: [PATCH 0896/1113] Fix filtering by project in Cloudstask inventory --- contrib/inventory/cloudstack.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/inventory/cloudstack.py b/contrib/inventory/cloudstack.py index 5911f662c94..e818aea8cff 100755 --- a/contrib/inventory/cloudstack.py +++ b/contrib/inventory/cloudstack.py @@ -109,11 +109,11 @@ class CloudStackInventory(object): project_id = self.get_project_id(options.project) if options.host: - data = self.get_host(options.host) + data = self.get_host(options.host, project_id) print(json.dumps(data, indent=2)) elif options.list: - data = self.get_list() + data = self.get_list(project_id) print(json.dumps(data, indent=2)) else: print("usage: --list | --host <hostname> [--project <project>]", From 33b550fc76bad8fe51f78cc7280586df4d34aea9 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Mar 2016 00:53:33 -0500 Subject: [PATCH 0897/1113] paging for amazon modules this adds a decorator for use in amazon modules so retrieval functions can page using the 'marker' that most boto functions return --- lib/ansible/module_utils/ec2.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 4fa7631f008..52b085eb786 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -26,6 +26,7 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os +from time import sleep try: import boto3 @@ -237,3 +238,27 @@ def ec2_connect(module): module.fail_json(msg="Either region or ec2_url must be specified") return ec2 + +def paging(pause=0): + """ Adds paging to boto retrieval functions that support 'marker' """ + def wrapper(f): + def page(*args, **kwargs): + results = [] + marker = None + while True: + try: + new = f(*args, marker=marker, **kwargs) + marker = new.next_marker + results.extend(new) + if not marker: + break + elif pause: + sleep(pause) + except TypeError: + # Older version of boto do not allow for marker param, just run normally + results = f(*args, **kwargs) + break + return results + return page + return wrapper + From 33f93f924135edfbb74e2d52c65372dd621ca377 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Tue, 8 Mar 2016 22:15:18 -0800 Subject: [PATCH 0898/1113] Fix misplaced paren. --- lib/ansible/plugins/connection/jail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/jail.py b/lib/ansible/plugins/connection/jail.py index 8f88b6ad28f..2196b1bf8f5 100644 --- a/lib/ansible/plugins/connection/jail.py +++ b/lib/ansible/plugins/connection/jail.py @@ -71,7 +71,7 @@ class Connection(ConnectionBase): def _search_executable(executable): cmd = distutils.spawn.find_executable(executable) if not cmd: - raise AnsibleError("%s command not found in PATH") % executable + raise AnsibleError("%s command not found in PATH" % executable) return cmd def list_jails(self): From 7067bb32b8eb0a6dc53c0278b8452594744a6f13 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sun, 6 Mar 2016 10:47:15 -0500 Subject: [PATCH 0899/1113] make all conditionals lists this brings them to equivalence with when: fixes #13905 --- lib/ansible/executor/task_executor.py | 18 +++++++++++------- lib/ansible/playbook/task.py | 6 +++--- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index a0881cc222b..1ed03ed2b2b 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -452,19 +452,23 @@ class TaskExecutor: # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): - if self._task.changed_when is not None: + if self._task.changed_when: cond = Conditional(loader=self._loader) - cond.when = [ self._task.changed_when ] + cond.when = self._task.changed_when result['changed'] = cond.evaluate_conditional(templar, vars_copy) + else: + result['changed'] = False def _evaluate_failed_when_result(result): - if self._task.failed_when is not None: + if self._task.failed_when: cond = Conditional(loader=self._loader) - cond.when = [ self._task.failed_when ] + cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result - return failed_when_result - return False + else: + failed_when_result = False + result['failed'] = False + return failed_when_result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) @@ -482,7 +486,7 @@ class TaskExecutor: if attempt < retries - 1: cond = Conditional(loader=self._loader) - cond.when = [ self._task.until ] + cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): break else: diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index c97e81e9648..8ee440386b6 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -70,11 +70,11 @@ class Task(Base, Conditional, Taggable, Become): _any_errors_fatal = FieldAttribute(isa='bool') _async = FieldAttribute(isa='int', default=0) - _changed_when = FieldAttribute(isa='string') + _changed_when = FieldAttribute(isa='list', default=[]) _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') _delegate_facts = FieldAttribute(isa='bool', default=False) - _failed_when = FieldAttribute(isa='string') + _failed_when = FieldAttribute(isa='list', default=[]) _first_available_file = FieldAttribute(isa='list') _loop = FieldAttribute(isa='string', private=True) _loop_args = FieldAttribute(isa='list', private=True) @@ -83,7 +83,7 @@ class Task(Base, Conditional, Taggable, Become): _poll = FieldAttribute(isa='int') _register = FieldAttribute(isa='string') _retries = FieldAttribute(isa='int', default=3) - _until = FieldAttribute(isa='string') + _until = FieldAttribute(isa='list', default=[]) def __init__(self, block=None, role=None, task_include=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' From 82c150b72f88c3e1ec5080fba73de28b78931334 Mon Sep 17 00:00:00 2001 From: Monty Taylor <mordred@inaugust.com> Date: Wed, 9 Mar 2016 11:54:30 -0600 Subject: [PATCH 0900/1113] Detect empty cache files in openstack inventory There are cases where it makes more sense to zero out a cache file as a form of invalidation instead of removing it. Detect those approrpriately --- contrib/inventory/openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py index 1c7207a9e17..cc1f6dbed67 100755 --- a/contrib/inventory/openstack.py +++ b/contrib/inventory/openstack.py @@ -159,7 +159,7 @@ def is_cache_stale(cache_file, cache_expiration_time, refresh=False): ''' Determines if cache file has expired, or if it is still valid ''' if refresh: return True - if os.path.isfile(cache_file): + if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0: mod_time = os.path.getmtime(cache_file) current_time = time.time() if (mod_time + cache_expiration_time) > current_time: From 9d61a6cba841913c4369dc02870efd5cb35c1981 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 9 Mar 2016 13:29:22 -0500 Subject: [PATCH 0901/1113] Fixing PlayIterator bugs * Unit tests exposed a problem where nested blocks did not correctly hit rescue/always portions of parent blocks * Cleaned up logic in PlayIterator * Unfortunately fixing the above exposed a potential problem in the block integration tests, where a failure in an "always" section may always lead to a failed state and the termination of execution beyond that point, so certain parts of the block integration test were disabled. --- lib/ansible/executor/play_iterator.py | 155 +++++++++++++--------- test/integration/test_blocks/main.yml | 26 ++-- test/units/executor/test_play_iterator.py | 21 +-- 3 files changed, 120 insertions(+), 82 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index eec3877d516..83abb40bbc1 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -258,6 +258,10 @@ class PlayIterator: return (state, None) if state.run_state == self.ITERATING_SETUP: + # First, we check to see if we were pending setup. If not, this is + # the first trip through ITERATING_SETUP, so we set the pending_setup + # flag and try to determine if we do in fact want to gather facts for + # the specified host. if not state.pending_setup: state.pending_setup = True @@ -272,13 +276,19 @@ class PlayIterator: if (gathering == 'implicit' and implied) or \ (gathering == 'explicit' and boolean(self._play.gather_facts)) or \ (gathering == 'smart' and implied and not host._gathered_facts): - # mark the host as having gathered facts + # The setup block is always self._blocks[0], as we inject it + # during the play compilation in __init__ above. setup_block = self._blocks[0] if setup_block.has_tasks() and len(setup_block.block) > 0: task = setup_block.block[0] if not peek: + # mark the host as having gathered facts, because we're + # returning the setup task to be executed host.set_gathered_facts(True) else: + # This is the second trip through ITERATING_SETUP, so we clear + # the flag and move onto the next block in the list while setting + # the run state to ITERATING_TASKS state.pending_setup = False state.cur_block += 1 @@ -293,86 +303,109 @@ class PlayIterator: if state.pending_setup: state.pending_setup = False - if self._check_failed_state(state): - state.run_state = self.ITERATING_RESCUE - elif state.cur_regular_task >= len(block.block): - state.run_state = self.ITERATING_ALWAYS + # First, we check for a child task state that is not failed, and if we + # have one recurse into it for the next task. If we're done with the child + # state, we clear it and drop back to geting the next task from the list. + if state.tasks_child_state: + if state.tasks_child_state.fail_state != self.FAILED_NONE: + # failed child state, so clear it and move into the rescue portion + state.tasks_child_state = None + state.fail_state |= self.FAILED_TASKS + state.run_state = self.ITERATING_RESCUE + else: + # get the next task recursively + (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek) + if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE: + # we're done with the child state, so clear it and continue + # back to the top of the loop to get the next task + state.tasks_child_state = None + continue else: - task = block.block[state.cur_regular_task] - # if the current task is actually a child block, we dive into it - if isinstance(task, Block) or state.tasks_child_state is not None: - if state.tasks_child_state is None: + # First here, we check to see if we've failed anywhere down the chain + # of states we have, and if so we move onto the rescue portion. Otherwise, + # we check to see if we've moved past the end of the list of tasks. If so, + # we move into the always portion of the block, otherwise we get the next + # task from the list. + if self._check_failed_state(state): + state.run_state = self.ITERATING_RESCUE + elif state.cur_regular_task >= len(block.block): + state.run_state = self.ITERATING_ALWAYS + else: + task = block.block[state.cur_regular_task] + # if the current task is actually a child block, create a child + # state for us to recurse into on the next pass + if isinstance(task, Block) or state.tasks_child_state is not None: state.tasks_child_state = HostState(blocks=[task]) state.tasks_child_state.run_state = self.ITERATING_TASKS state.tasks_child_state.cur_role = state.cur_role - (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek) - if task is None: - # check to see if the child state was failed, if so we need to - # fail here too so we don't continue iterating tasks - if state.tasks_child_state.fail_state != self.FAILED_NONE: - state.fail_state |= self.FAILED_TASKS - state.tasks_child_state = None - state.cur_regular_task += 1 - continue - else: + # since we've created the child state, clear the task + # so we can pick up the child state on the next pass + task = None state.cur_regular_task += 1 elif state.run_state == self.ITERATING_RESCUE: - if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE: - state.run_state = self.ITERATING_ALWAYS - elif state.cur_rescue_task >= len(block.rescue): - if len(block.rescue) > 0: - state.fail_state = self.FAILED_NONE - state.run_state = self.ITERATING_ALWAYS + # The process here is identical to ITERATING_TASKS, except instead + # we move into the always portion of the block. + if state.rescue_child_state: + if state.rescue_child_state.fail_state != self.FAILED_NONE: + state.rescue_child_state = None + state.fail_state |= self.FAILED_RESCUE + state.run_state = self.ITERATING_ALWAYS + else: + (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek) + if task is None: + state.rescue_child_state = None + continue else: - task = block.rescue[state.cur_rescue_task] - if isinstance(task, Block) or state.rescue_child_state is not None: - if state.rescue_child_state is None: + if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE: + state.run_state = self.ITERATING_ALWAYS + elif state.cur_rescue_task >= len(block.rescue): + if len(block.rescue) > 0: + state.fail_state = self.FAILED_NONE + state.run_state = self.ITERATING_ALWAYS + else: + task = block.rescue[state.cur_rescue_task] + if isinstance(task, Block) or state.rescue_child_state is not None: state.rescue_child_state = HostState(blocks=[task]) state.rescue_child_state.run_state = self.ITERATING_TASKS state.rescue_child_state.cur_role = state.cur_role - (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek) - if task is None: - # check to see if the child state was failed, if so we need to - # fail here too so we don't continue iterating rescue - if state.rescue_child_state.fail_state != self.FAILED_NONE: - state.fail_state |= self.FAILED_RESCUE - state.rescue_child_state = None - state.cur_rescue_task += 1 - continue - else: + task = None state.cur_rescue_task += 1 elif state.run_state == self.ITERATING_ALWAYS: - if state.cur_always_task >= len(block.always): - if state.fail_state != self.FAILED_NONE: + # And again, the process here is identical to ITERATING_TASKS, except + # instead we either move onto the next block in the list, or we set the + # run state to ITERATING_COMPLETE in the event of any errors, or when we + # have hit the end of the list of blocks. + if state.always_child_state: + if state.always_child_state.fail_state != self.FAILED_NONE: + state.always_child_state = None + state.fail_state |= self.FAILED_ALWAYS state.run_state = self.ITERATING_COMPLETE else: - state.cur_block += 1 - state.cur_regular_task = 0 - state.cur_rescue_task = 0 - state.cur_always_task = 0 - state.run_state = self.ITERATING_TASKS - state.tasks_child_state = None - state.rescue_child_state = None - state.always_child_state = None + (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek) + if task is None: + state.always_child_state = None else: - task = block.always[state.cur_always_task] - if isinstance(task, Block) or state.always_child_state is not None: - if state.always_child_state is None: + if state.cur_always_task >= len(block.always): + if state.fail_state != self.FAILED_NONE: + state.run_state = self.ITERATING_COMPLETE + else: + state.cur_block += 1 + state.cur_regular_task = 0 + state.cur_rescue_task = 0 + state.cur_always_task = 0 + state.run_state = self.ITERATING_TASKS + state.tasks_child_state = None + state.rescue_child_state = None + state.always_child_state = None + else: + task = block.always[state.cur_always_task] + if isinstance(task, Block) or state.always_child_state is not None: state.always_child_state = HostState(blocks=[task]) state.always_child_state.run_state = self.ITERATING_TASKS state.always_child_state.cur_role = state.cur_role - (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek) - if task is None: - # check to see if the child state was failed, if so we need to - # fail here too so we don't continue iterating always - if state.always_child_state.fail_state != self.FAILED_NONE: - state.fail_state |= self.FAILED_ALWAYS - state.always_child_state = None - state.cur_always_task += 1 - continue - else: + task = None state.cur_always_task += 1 elif state.run_state == self.ITERATING_COMPLETE: diff --git a/test/integration/test_blocks/main.yml b/test/integration/test_blocks/main.yml index cb6fc66600e..d318145ac9a 100644 --- a/test/integration/test_blocks/main.yml +++ b/test/integration/test_blocks/main.yml @@ -33,17 +33,17 @@ - name: set block always run flag set_fact: block_always_run: true - - block: - - meta: noop - always: - - name: set nested block always run flag - set_fact: - nested_block_always_run: true - - name: fail in always - fail: - - name: tasks flag should not be set after failure in always - set_fact: - always_run_after_failure: true + #- block: + # - meta: noop + # always: + # - name: set nested block always run flag + # set_fact: + # nested_block_always_run: true + # - name: fail in always + # fail: + # - name: tasks flag should not be set after failure in always + # set_fact: + # always_run_after_failure: true - meta: clear_host_errors post_tasks: @@ -52,7 +52,7 @@ - block_tasks_run - block_rescue_run - block_always_run - - nested_block_always_run + #- nested_block_always_run - not tasks_run_after_failure - not rescue_run_after_failure - not always_run_after_failure @@ -84,7 +84,7 @@ include: fail.yml args: msg: "failed from rescue" - - name: tasks flag should not be set after failure in rescue + - name: flag should not be set after failure in rescue set_fact: rescue_run_after_failure: true always: diff --git a/test/units/executor/test_play_iterator.py b/test/units/executor/test_play_iterator.py index d093eba6769..d8e0d97e021 100644 --- a/test/units/executor/test_play_iterator.py +++ b/test/units/executor/test_play_iterator.py @@ -116,9 +116,7 @@ class TestPlayIterator(unittest.TestCase): # lookup up an original task target_task = p._entries[0].tasks[0].block[0] - print("the task is: %s (%s)" % (target_task, target_task._uuid)) task_copy = target_task.copy(exclude_block=True) - print("the copied task is: %s (%s)" % (task_copy, task_copy._uuid)) found_task = itr.get_original_task(hosts[0], task_copy) self.assertEqual(target_task, found_task) @@ -209,18 +207,19 @@ class TestPlayIterator(unittest.TestCase): - block: - block: - debug: msg="this is the first task" - rescue: - - block: + - ping: + rescue: - block: - block: - block: - - debug: msg="this is the rescue task" - always: - - block: + - block: + - debug: msg="this is the rescue task" + always: - block: - block: - block: - - debug: msg="this is the rescue task" + - block: + - debug: msg="this is the always task" """, }) @@ -254,28 +253,34 @@ class TestPlayIterator(unittest.TestCase): (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') + self.assertEqual(task.args, dict(_raw_params='flush_handlers')) # get the first task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg='this is the first task')) # fail the host itr.mark_host_failed(hosts[0]) # get the resuce task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg='this is the rescue task')) # get the always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') + self.assertEqual(task.args, dict(msg='this is the always task')) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') + self.assertEqual(task.args, dict(_raw_params='flush_handlers')) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') + self.assertEqual(task.args, dict(_raw_params='flush_handlers')) # end of iteration (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNone(task) From 369b3b317ef9f47f05d3d93463ecd932bd0cf36f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Mar 2016 13:57:19 -0500 Subject: [PATCH 0902/1113] renamed shell to console in last spot --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 4fa964012de..b9678e2ab9b 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,7 @@ setup(name='ansible', 'bin/ansible-pull', 'bin/ansible-doc', 'bin/ansible-galaxy', - 'bin/ansible-shell', + 'bin/ansible-console', 'bin/ansible-vault', ], data_files=[], From eb463fab00c26274980e0469dbb208f34daad7e8 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 9 Mar 2016 13:53:52 -0500 Subject: [PATCH 0903/1113] Also changing ansible-shell -> ansible-console in cli code comments --- lib/ansible/cli/console.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cli/console.py b/lib/ansible/cli/console.py index 2c96917f367..38ec0379341 100644 --- a/lib/ansible/cli/console.py +++ b/lib/ansible/cli/console.py @@ -1,12 +1,12 @@ # (c) 2014, Nandor Sivok <dominis@haxor.hu> # (c) 2016, Redhat Inc # -# ansible-shell is free software: you can redistribute it and/or modify +# ansible-console is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # -# ansible-shell is distributed in the hope that it will be useful, +# ansible-console is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. From 2b8a0cdddc54a545be49898ed7b36f978171b719 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Mar 2016 11:14:02 -0800 Subject: [PATCH 0904/1113] Fix ssh connection unittests for python3.4 --- test/units/plugins/connections/test_connection_ssh.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/units/plugins/connections/test_connection_ssh.py b/test/units/plugins/connections/test_connection_ssh.py index efdbf2b7024..039be410527 100644 --- a/test/units/plugins/connections/test_connection_ssh.py +++ b/test/units/plugins/connections/test_connection_ssh.py @@ -316,11 +316,11 @@ class TestConnectionBaseClass(unittest.TestCase): # test with C.DEFAULT_SCP_IF_SSH disabled C.DEFAULT_SCP_IF_SSH = False - expected_in_data = b"put {0} {1}\n".format(pipes.quote('/path/to/in/file'), pipes.quote('/path/to/dest/file')) + expected_in_data = b' '.join((b'put', to_bytes(pipes.quote('/path/to/in/file')), to_bytes(pipes.quote('/path/to/dest/file')))) + b'\n' res = conn.put_file('/path/to/in/file', '/path/to/dest/file') conn._run.assert_called_with('some command to run', expected_in_data) - expected_in_data = b"put {0} {1}\n".format(pipes.quote(to_bytes('/path/to/in/file/with/unicode-fö〩')), pipes.quote(to_bytes('/path/to/dest/file/with/unicode-fö〩'))) + expected_in_data = b' '.join((b'put', to_bytes(pipes.quote('/path/to/in/file/with/unicode-fö〩')), to_bytes(pipes.quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n' res = conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩') conn._run.assert_called_with('some command to run', expected_in_data) @@ -354,11 +354,11 @@ class TestConnectionBaseClass(unittest.TestCase): # test with C.DEFAULT_SCP_IF_SSH disabled C.DEFAULT_SCP_IF_SSH = False - expected_in_data = b"get {0} {1}\n".format(pipes.quote('/path/to/in/file'), pipes.quote('/path/to/dest/file')) + expected_in_data = b' '.join((b'get', to_bytes(pipes.quote('/path/to/in/file')), to_bytes(pipes.quote('/path/to/dest/file')))) + b'\n' res = conn.fetch_file('/path/to/in/file', '/path/to/dest/file') conn._run.assert_called_with('some command to run', expected_in_data) - expected_in_data = b"get {0} {1}\n".format(pipes.quote(to_bytes('/path/to/in/file/with/unicode-fö〩')), pipes.quote(to_bytes('/path/to/dest/file/with/unicode-fö〩'))) + expected_in_data = b' '.join((b'get', to_bytes(pipes.quote('/path/to/in/file/with/unicode-fö〩')), to_bytes(pipes.quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n' res = conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩') conn._run.assert_called_with('some command to run', expected_in_data) From c0e2dd16933f59c49e9fd46bb5512a41f33b08bf Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Mar 2016 11:17:10 -0800 Subject: [PATCH 0905/1113] Fix ssh connection plugin to work with python3 --- lib/ansible/plugins/connection/ssh.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index c24a5623ed3..56acd57afb2 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -323,7 +323,7 @@ class Connection(ConnectionBase): if isinstance(cmd, (text_type, binary_type)): cmd = to_bytes(cmd) else: - cmd = map(to_bytes, cmd) + cmd = list(map(to_bytes, cmd)) if not in_data: try: @@ -585,7 +585,7 @@ class Connection(ConnectionBase): remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1 cmd_summary = "%s..." % args[0] - for attempt in xrange(remaining_tries): + for attempt in range(remaining_tries): try: return_tuple = self._exec_command(*args, **kwargs) # 0 = success From 0628951ac6f80443a523a6de568576ebaea452ff Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Mar 2016 11:27:19 -0800 Subject: [PATCH 0906/1113] Handle shlex incompatibility between python2.6 and python3 --- lib/ansible/plugins/connection/__init__.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 75e306054be..8528a2e75d9 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -128,7 +128,16 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to the argument list. The list will not contain any empty elements. """ - return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] + try: + # Python 2.6.x shlex doesn't handle unicode type so we have to + # convert args to byte string for that case. More efficient to + # try without conversion first but python2.6 doesn't throw an + # exception, it merely mangles the output: + # >>> shlex.split(u't e') + # ['t\x00\x00\x00', '\x00\x00\x00e\x00\x00\x00'] + return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] + except AttributeError: + return [to_unicode(x.strip()) for x in shlex.split(argstring) if x.strip()] @abstractproperty def transport(self): From 331f62f769aa0147a3f43ddd896c6561abde720a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 9 Mar 2016 11:28:49 -0800 Subject: [PATCH 0907/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 45745424f70..c1398d98ed9 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 45745424f702980a8860ab5ba2d94cdfd0311695 +Subproject commit c1398d98ed95b53625ecbb514d532b53378d2075 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index b51efc51bc6..33a557cc59d 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit b51efc51bc64ef99b389acb6166d0eb46d984085 +Subproject commit 33a557cc59dfce486634f4a0c0f0db4431afb0f7 From 7f233c4c5e65e874f0a99464117b63ded1beff99 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Wed, 9 Mar 2016 15:52:23 -0500 Subject: [PATCH 0908/1113] Updating submodule refs --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c1398d98ed9..c86a0ef84a4 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c1398d98ed95b53625ecbb514d532b53378d2075 +Subproject commit c86a0ef84a46133814bf6f240237640139e09fad From 9318727021456a4b5d306bb051e9ce34b509177c Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Mar 2016 15:18:37 -0500 Subject: [PATCH 0909/1113] corrected changed_when handling --- lib/ansible/executor/task_executor.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 1ed03ed2b2b..2571b325885 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -452,12 +452,10 @@ class TaskExecutor: # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): - if self._task.changed_when: + if self._task.changed_when is not None and self._task.changed_when: cond = Conditional(loader=self._loader) cond.when = self._task.changed_when result['changed'] = cond.evaluate_conditional(templar, vars_copy) - else: - result['changed'] = False def _evaluate_failed_when_result(result): if self._task.failed_when: From 18e9bc9753d7cf3c4768f168666733190a26c6b5 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 27 Nov 2015 09:38:37 -0800 Subject: [PATCH 0910/1113] common retry and rate limiting decorators for apis allows modules to wrap their request functions in common retry and rate limiting decorators or functions --- lib/ansible/module_utils/api.py | 103 ++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 lib/ansible/module_utils/api.py diff --git a/lib/ansible/module_utils/api.py b/lib/ansible/module_utils/api.py new file mode 100644 index 00000000000..79d38a2190b --- /dev/null +++ b/lib/ansible/module_utils/api.py @@ -0,0 +1,103 @@ +# +# (c) 2015 Brian Ccoa, <bcoca@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +# +""" +This module adds shared support for generic api modules + +In order to use this module, include it as part of a custom +module as shown below. + +** Note: The order of the import statements does matter. ** + +from ansible.module_utils.basic import * +from ansible.module_utils.api import * + +The 'api' module provides the following common argument specs: + + * rate limit spec + - rate: number of requests per time unit (int) + - rate_limit: time window in which the limit is applied in seconds + + * retry spec + - retries: number of attempts + - retry_pause: delay between attempts in seconds + +""" +import time + +def rate_limit_argument_spec(spec=None): + """Creates an argument spec for working with rate limiting""" + arg_spec = (dict( + rate=dict(type='int'), + rate_limit=dict(type='int'), + )) + if spec: + arg_spec.update(spec) + return arg_spec + +def retry_argument_spec(spec=None): + """Creates an argument spec for working with retrying""" + arg_spec = (dict( + retries=dict(type='int'), + retry_pause=dict(type='float', default=1), + )) + if spec: + arg_spec.update(spec) + return arg_spec + +def rate_limit(rate=None, rate_limit=None): + """rate limiting decorator""" + minrate = None + if rate is not None and rate_limit is not None: + minrate = float(rate_limit) / float(rate) + def wrapper(f): + last = [0.0] + def ratelimited(*args,**kwargs): + if minrate is not None: + elapsed = time.clock() - last[0] + left = minrate - elapsed + if left > 0: + time.sleep(left) + last[0] = time.clock() + ret = f(*args,**kwargs) + return ret + return ratelimited + return wrapper + +def retry(retries=None, retry_pause=1): + """Retry decorator""" + def wrapper(f): + retry_count = 0 + def retried(*args,**kwargs): + if retries is not None: + ret = None + while True: + retry_count += 1 + if retry_count >= retries: + raise Exception("Retry limit exceeded: %d" % retries) + try: + ret = f(*args,**kwargs) + except: + pass + if ret: + break + time.sleep(retry_pause) + return ret + return retried + return wrapper + From 6c835b2600f33c272a411e86a179c3ee878a0b2c Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Wed, 9 Mar 2016 17:36:39 -0800 Subject: [PATCH 0911/1113] ensure assert and fail work before we test anything else --- test/integration/Makefile | 7 +++++-- test/integration/test_test_infra.yml | 21 +++++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 test/integration/test_test_infra.yml diff --git a/test/integration/Makefile b/test/integration/Makefile index d55f1fa830f..0f6e1ddb38c 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -23,9 +23,12 @@ VAULT_PASSWORD_FILE = vault-password CONSUL_RUNNING := $(shell python consul_running.py) EUID := $(shell id -u -r) -all: setup parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log test_connection +all: setup test_test_infra parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log test_connection -setup: +test_test_infra: + [ "$$(ansible-playbook -i $(INVENTORY) test_test_infra.yml -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) | fgrep works | xargs)" = "msg: fail works (True) msg: assert works (True)" ] + +setup: test_test_infra rm -rf $(TEST_DIR) mkdir -p $(TEST_DIR) diff --git a/test/integration/test_test_infra.yml b/test/integration/test_test_infra.yml new file mode 100644 index 00000000000..b05d0e2e3e7 --- /dev/null +++ b/test/integration/test_test_infra.yml @@ -0,0 +1,21 @@ +- hosts: testhost + gather_facts: no + tasks: + - fail: + ignore_errors: yes + register: fail_out + + - debug: + msg: fail works ({{ fail_out.failed }}) + + - assert: + that: false + ignore_errors: yes + register: assert_out + + - debug: + msg: assert works ({{ assert_out.failed }}) + + - fail: + msg: fail actually failed + From dbff3a6bbc3732cb7f2f4e7ed1e3f4e91661cd5d Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Wed, 9 Mar 2016 17:48:59 -0800 Subject: [PATCH 0912/1113] Fix blind override in failed_when caused all task failures to be success --- lib/ansible/executor/task_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 2571b325885..beceef18481 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -465,7 +465,7 @@ class TaskExecutor: result['failed_when_result'] = result['failed'] = failed_when_result else: failed_when_result = False - result['failed'] = False + result['failed'] = result.get('failed', False) return failed_when_result if 'ansible_facts' in result: From d7d04ad2cd724f46b915eb0d7a64e0107f15518a Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Thu, 10 Mar 2016 03:18:09 +0100 Subject: [PATCH 0913/1113] Fix misspelling and some cosmetic change --- lib/ansible/playbook/become.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py index 1e579751d46..a2d1109cfcc 100644 --- a/lib/ansible/playbook/become.py +++ b/lib/ansible/playbook/become.py @@ -31,7 +31,7 @@ except ImportError: class Become: - # Privlege escalation + # Privilege escalation _become = FieldAttribute(isa='bool') _become_method = FieldAttribute(isa='string') _become_user = FieldAttribute(isa='string') @@ -60,7 +60,7 @@ class Become: This is called from the Base object's preprocess_data() method which in turn is called pretty much anytime any sort of playbook object - (plays, tasks, blocks, etc) are created. + (plays, tasks, blocks, etc) is created. """ self._detect_privilege_escalation_conflict(ds) @@ -90,7 +90,6 @@ class Become: display.deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)") - return ds def set_become_defaults(self, become, become_method, become_user): From a5e11b1325cb7c42f501ac8a461d6e977abdd645 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Thu, 10 Mar 2016 03:24:04 +0100 Subject: [PATCH 0914/1113] Another misspelling ...while reading the source --- lib/ansible/playbook/conditional.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index c8c6a9359ec..5615a252b8c 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -56,7 +56,7 @@ class Conditional: False if any of them evaluate as such. ''' - # since this is a mixin, it may not have an underlying datastructure + # since this is a mix-in, it may not have an underlying datastructure # associated with it, so we pull it out now in case we need it for # error reporting below ds = None @@ -86,7 +86,7 @@ class Conditional: if conditional in all_vars and '-' not in text_type(all_vars[conditional]): conditional = all_vars[conditional] - # make sure the templar is using the variables specifed to this method + # make sure the templar is using the variables specified with this method templar.set_available_variables(variables=all_vars) try: From 140df20504617dcf5f7e4951443b9110420d4792 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 10 Mar 2016 00:16:24 -0500 Subject: [PATCH 0915/1113] corrected failed_when test --- test/integration/roles/test_failed_when/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_failed_when/tasks/main.yml b/test/integration/roles/test_failed_when/tasks/main.yml index 4a5617e1423..7a0dce3b2ec 100644 --- a/test/integration/roles/test_failed_when/tasks/main.yml +++ b/test/integration/roles/test_failed_when/tasks/main.yml @@ -23,7 +23,7 @@ - assert: that: - - "'failed' not in result" + - "'failed' in result and result.failed" - name: command rc 0 failed_when_result False shell: exit 0 From 7e6343213b9ba78db828733faf9d9c0cb8b96ece Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 10 Mar 2016 00:24:27 -0500 Subject: [PATCH 0916/1113] complete correction of test, key exists but is false --- test/integration/roles/test_failed_when/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_failed_when/tasks/main.yml b/test/integration/roles/test_failed_when/tasks/main.yml index 7a0dce3b2ec..37c495e0d4e 100644 --- a/test/integration/roles/test_failed_when/tasks/main.yml +++ b/test/integration/roles/test_failed_when/tasks/main.yml @@ -23,7 +23,7 @@ - assert: that: - - "'failed' in result and result.failed" + - "'failed' in result and not result.failed" - name: command rc 0 failed_when_result False shell: exit 0 From 923548f6d5877c8803edddd70136f070ccb77df4 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 10 Mar 2016 00:29:32 -0500 Subject: [PATCH 0917/1113] removed test_infra from setup as it fails when using --tags --- test/integration/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 0f6e1ddb38c..07b2d3c78cb 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -28,7 +28,7 @@ all: setup test_test_infra parsing test_var_precedence unicode test_templating_s test_test_infra: [ "$$(ansible-playbook -i $(INVENTORY) test_test_infra.yml -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) | fgrep works | xargs)" = "msg: fail works (True) msg: assert works (True)" ] -setup: test_test_infra +setup: rm -rf $(TEST_DIR) mkdir -p $(TEST_DIR) @@ -39,7 +39,7 @@ parsing: setup includes: setup ansible-playbook test_includes.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) -pull: pull_run pull_no_127 pull_limit_inventory +pull: pull_run pull_no_127 pull_limit_inventory pull_run: ansible-pull -d $(MYTMPDIR) -U https://github.com/ansible-test-robinro/pull-integration-test.git | grep MAGICKEYWORD; \ From 6a85da7e0cf9d1570b44e2782ddeac2848c85a46 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 10 Mar 2016 01:00:33 -0500 Subject: [PATCH 0918/1113] avoid private attributes hardcode adding with_ for tasks --- hacking/dump_playbook_attributes.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/hacking/dump_playbook_attributes.py b/hacking/dump_playbook_attributes.py index 6a0f08f45bc..88029cd24d0 100755 --- a/hacking/dump_playbook_attributes.py +++ b/hacking/dump_playbook_attributes.py @@ -29,13 +29,11 @@ for aclass in class_list: # build ordered list to loop over and dict with attributes clist.append(name) - oblist[name] = aobj.__dict__['_attributes'] + oblist[name] = {x: aobj.__dict__['_attributes'][x] for x in aobj.__dict__['_attributes'] if 'private' not in x or not x.private} # loop is really with_ for users - if 'loop' in oblist[name]: + if name == 'Task': oblist[name]['with_<lookup_plugin>'] = True - del oblist[name]['loop'] - del oblist[name]['loop_args'] # local_action is implicit with action if 'action' in oblist[name]: From 4264df693f06219863d72fb4ef9db8ff10730970 Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Thu, 10 Mar 2016 11:41:14 +0100 Subject: [PATCH 0919/1113] docsite, cloudstack: add limited VPC support Also see https://github.com/ansible/ansible-modules-extras/issues/1811 --- docsite/rst/guide_cloudstack.rst | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docsite/rst/guide_cloudstack.rst b/docsite/rst/guide_cloudstack.rst index c798b26ea12..425000f1019 100644 --- a/docsite/rst/guide_cloudstack.rst +++ b/docsite/rst/guide_cloudstack.rst @@ -23,6 +23,10 @@ You'll need this Python module installed on the execution host, usually your wor .. note:: cs also includes a command line interface for ad-hoc interaction with the CloudStack API e.g. ``$ cs listVirtualMachines state=Running``. +Limitations and Known Issues +```````````````````````````` +VPC support is not yet fully implemented and tested. The community is working on the VPC integration. + Credentials File ```````````````` You can pass credentials and the endpoint of your cloud as module arguments, however in most cases it is a far less work to store your credentials in the cloudstack.ini file. @@ -192,9 +196,9 @@ In the above play we defined 3 tasks and use the group ``cloud-vm`` as target to In the first task, we ensure we have a running VM created with the Debian template. If the VM is already created but stopped, it would just start it. If you like to change the offering on an exisiting VM, you must add ``force: yes`` to the task, which would stop the VM, change the offering and start the VM again. -In the second task we ensure the ports are opened if we give a public IP to the VM. +In the second task we ensure the ports are opened if we give a public IP to the VM. -In the third task we add static NAT to the VMs having a public IP defined. +In the third task we add static NAT to the VMs having a public IP defined. .. Note:: The public IP addresses must have been acquired in advance, also see ``cs_ip_address`` From 4e528e9535732b7541155b323cdab2377d74cc3a Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 07:25:11 -0500 Subject: [PATCH 0920/1113] Removing explicit setting of failed/failed_when --- lib/ansible/executor/task_executor.py | 4 ---- lib/ansible/playbook/task.py | 6 +++--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index beceef18481..e70bf6c91f9 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -463,10 +463,6 @@ class TaskExecutor: cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result - else: - failed_when_result = False - result['failed'] = result.get('failed', False) - return failed_when_result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 8ee440386b6..bed77bd746a 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -70,11 +70,11 @@ class Task(Base, Conditional, Taggable, Become): _any_errors_fatal = FieldAttribute(isa='bool') _async = FieldAttribute(isa='int', default=0) - _changed_when = FieldAttribute(isa='list', default=[]) + _changed_when = FieldAttribute(isa='list') _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') _delegate_facts = FieldAttribute(isa='bool', default=False) - _failed_when = FieldAttribute(isa='list', default=[]) + _failed_when = FieldAttribute(isa='list') _first_available_file = FieldAttribute(isa='list') _loop = FieldAttribute(isa='string', private=True) _loop_args = FieldAttribute(isa='list', private=True) @@ -83,7 +83,7 @@ class Task(Base, Conditional, Taggable, Become): _poll = FieldAttribute(isa='int') _register = FieldAttribute(isa='string') _retries = FieldAttribute(isa='int', default=3) - _until = FieldAttribute(isa='list', default=[]) + _until = FieldAttribute(isa='list') def __init__(self, block=None, role=None, task_include=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' From 1afa369eecb43024eb98555e253b8c4064ee9a70 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Thu, 10 Mar 2016 13:55:49 +0100 Subject: [PATCH 0921/1113] Move the Github issue and pull-request templates to .github In essence, most people do not need to use the templates directly, getting them out of the way increases the signal-to-noise ratio of the root directory. Direct people to what they are looking for. --- ISSUE_TEMPLATE.md => .github/ISSUE_TEMPLATE.md | 0 PULL_REQUEST_TEMPLATE.md => .github/PULL_REQUEST_TEMPLATE.md | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename ISSUE_TEMPLATE.md => .github/ISSUE_TEMPLATE.md (100%) rename PULL_REQUEST_TEMPLATE.md => .github/PULL_REQUEST_TEMPLATE.md (100%) diff --git a/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md similarity index 100% rename from ISSUE_TEMPLATE.md rename to .github/ISSUE_TEMPLATE.md diff --git a/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md similarity index 100% rename from PULL_REQUEST_TEMPLATE.md rename to .github/PULL_REQUEST_TEMPLATE.md From ccf646665b1f5175f26706ebb5b5e9fff4e1d20c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 08:01:54 -0500 Subject: [PATCH 0922/1113] Revert "Removing explicit setting of failed/failed_when" This reverts commit 4e528e9535732b7541155b323cdab2377d74cc3a. --- lib/ansible/executor/task_executor.py | 4 ++++ lib/ansible/playbook/task.py | 6 +++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index e70bf6c91f9..beceef18481 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -463,6 +463,10 @@ class TaskExecutor: cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result + else: + failed_when_result = False + result['failed'] = result.get('failed', False) + return failed_when_result if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index bed77bd746a..8ee440386b6 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -70,11 +70,11 @@ class Task(Base, Conditional, Taggable, Become): _any_errors_fatal = FieldAttribute(isa='bool') _async = FieldAttribute(isa='int', default=0) - _changed_when = FieldAttribute(isa='list') + _changed_when = FieldAttribute(isa='list', default=[]) _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') _delegate_facts = FieldAttribute(isa='bool', default=False) - _failed_when = FieldAttribute(isa='list') + _failed_when = FieldAttribute(isa='list', default=[]) _first_available_file = FieldAttribute(isa='list') _loop = FieldAttribute(isa='string', private=True) _loop_args = FieldAttribute(isa='list', private=True) @@ -83,7 +83,7 @@ class Task(Base, Conditional, Taggable, Become): _poll = FieldAttribute(isa='int') _register = FieldAttribute(isa='string') _retries = FieldAttribute(isa='int', default=3) - _until = FieldAttribute(isa='list') + _until = FieldAttribute(isa='list', default=[]) def __init__(self, block=None, role=None, task_include=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' From 963178f392825d0699e1ab69a836aaa199fea2c7 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 09:06:08 -0500 Subject: [PATCH 0923/1113] Revert "complete correction of test, key exists but is false" This reverts commit 7e6343213b9ba78db828733faf9d9c0cb8b96ece. --- test/integration/roles/test_failed_when/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_failed_when/tasks/main.yml b/test/integration/roles/test_failed_when/tasks/main.yml index 37c495e0d4e..7a0dce3b2ec 100644 --- a/test/integration/roles/test_failed_when/tasks/main.yml +++ b/test/integration/roles/test_failed_when/tasks/main.yml @@ -23,7 +23,7 @@ - assert: that: - - "'failed' in result and not result.failed" + - "'failed' in result and result.failed" - name: command rc 0 failed_when_result False shell: exit 0 From c2441c15c30d6bba948080e7ab993fbaeb1b542c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 09:08:17 -0500 Subject: [PATCH 0924/1113] Revert "corrected failed_when test" This reverts commit 140df20504617dcf5f7e4951443b9110420d4792. --- test/integration/roles/test_failed_when/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_failed_when/tasks/main.yml b/test/integration/roles/test_failed_when/tasks/main.yml index 7a0dce3b2ec..4a5617e1423 100644 --- a/test/integration/roles/test_failed_when/tasks/main.yml +++ b/test/integration/roles/test_failed_when/tasks/main.yml @@ -23,7 +23,7 @@ - assert: that: - - "'failed' in result and result.failed" + - "'failed' not in result" - name: command rc 0 failed_when_result False shell: exit 0 From 5cbdaff9fddc6a4bd4042edc6edfe1619514bc10 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 10 Mar 2016 10:31:03 -0500 Subject: [PATCH 0925/1113] avoid creating extra tmp dirs we don't delete fixes #14867 --- lib/ansible/plugins/action/template.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index c5c98861fb9..22782b225a8 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -47,7 +47,7 @@ class ActionModule(ActionBase): return dest_stat['checksum'] - def run(self, tmp=None, task_vars=None): + def run(self, tmp='', task_vars=None): ''' handler for template operations ''' if task_vars is None: task_vars = dict() @@ -69,9 +69,6 @@ class ActionModule(ActionBase): result['msg'] = "src and dest are required" return result - if tmp is None: - tmp = self._make_tmp_path() - if faf: source = self._get_first_available_file(faf, task_vars.get('_original_file', None, 'templates')) if source is None: @@ -178,8 +175,6 @@ class ActionModule(ActionBase): if result.get('changed', False) and self._play_context.diff: result['diff'] = diff - return result - else: # when running the file module based on the template data, we do # not want the source filename (the name of the template) to be used, @@ -194,6 +189,6 @@ class ActionModule(ActionBase): follow=True, ), ) - result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars)) - return result + + return result From a07495e4c8280edd7897f29fb994abfda9d2d24b Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 10 Mar 2016 10:49:44 -0500 Subject: [PATCH 0926/1113] allow tests to run with parameters also fixed test_test_infra to allow tags --- test/integration/test_test_infra.yml | 6 ++++-- test/utils/run_tests.sh | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/test/integration/test_test_infra.yml b/test/integration/test_test_infra.yml index b05d0e2e3e7..b78e36d2c92 100644 --- a/test/integration/test_test_infra.yml +++ b/test/integration/test_test_infra.yml @@ -1,6 +1,8 @@ - hosts: testhost gather_facts: no - tasks: + tags: + - always + tasks: - fail: ignore_errors: yes register: fail_out @@ -14,7 +16,7 @@ register: assert_out - debug: - msg: assert works ({{ assert_out.failed }}) + msg: assert works ({{ assert_out.failed }}) - fail: msg: fail actually failed diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index d4787da69a0..05c110d19e7 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -10,6 +10,6 @@ if [ "${TARGET}" = "sanity" ]; then else docker build --pull=true -t ansible_test/${TARGET} test/utils/docker/${TARGET} docker run -d --volume="${PWD}:/root/ansible" ${TARGET_OPTIONS} ansible_test/${TARGET} > /tmp/cid_${TARGET} - docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c 'cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)' + docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c "export TEST_FLAGS='${TEST_FLAGS}'; cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)" docker kill $(cat /tmp/cid_${TARGET}) fi From 09e5f0578a73fa3df01780e9852894177bf03f89 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 11:02:26 -0500 Subject: [PATCH 0927/1113] Don't always insert failed in the results (again) --- lib/ansible/executor/task_executor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index beceef18481..3b8be159750 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -465,7 +465,6 @@ class TaskExecutor: result['failed_when_result'] = result['failed'] = failed_when_result else: failed_when_result = False - result['failed'] = result.get('failed', False) return failed_when_result if 'ansible_facts' in result: From 139b3981911b615c77c64894a783e3e0205f6dc4 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 11:32:09 -0500 Subject: [PATCH 0928/1113] Adding some packages to the unit test portions of travis testing Missing python-memcached and python-redis result in some tests being skipped and lowering the overall coverage % --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f90bf997b5b..8ce3f566897 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,7 +26,7 @@ addons: packages: - python2.4 install: - - pip install tox + - pip install tox redis memcached script: - ./test/utils/run_tests.sh after_success: From fe450290d9ead55a067b80b32cd25438576425f0 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 11:46:09 -0500 Subject: [PATCH 0929/1113] Fixing memcached->python-memcached for pip install of travis setup --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 8ce3f566897..419fa7cdffe 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,7 +26,7 @@ addons: packages: - python2.4 install: - - pip install tox redis memcached + - pip install tox redis pyton-memcached script: - ./test/utils/run_tests.sh after_success: From e52c1f26d3b4abddb9470378cd08a6ac5212b06a Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 11:53:08 -0500 Subject: [PATCH 0930/1113] Fixing typo from python-memcached change in travis yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 419fa7cdffe..dde047b122d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,7 +26,7 @@ addons: packages: - python2.4 install: - - pip install tox redis pyton-memcached + - pip install tox redis python-memcached script: - ./test/utils/run_tests.sh after_success: From 936763bb2c31c375400487baeec12650acbfe856 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 12:02:26 -0500 Subject: [PATCH 0931/1113] Moving pip test requirements from travis.yml to tox test-requirements.txt --- .travis.yml | 2 +- test-requirements.txt | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index dde047b122d..f90bf997b5b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,7 +26,7 @@ addons: packages: - python2.4 install: - - pip install tox redis python-memcached + - pip install tox script: - ./test/utils/run_tests.sh after_success: diff --git a/test-requirements.txt b/test-requirements.txt index 6cc4f9fd8e4..ec176f5bf29 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,3 +8,5 @@ passlib coverage coveralls unittest2 +redis +python-memcached From f878a5d2e045b42f9ae50f70e0c730692ec118b3 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Tue, 8 Mar 2016 22:25:57 -0800 Subject: [PATCH 0932/1113] Fix unicode handling in connection plugins. --- lib/ansible/plugins/action/fetch.py | 3 ++- lib/ansible/plugins/connection/chroot.py | 2 +- lib/ansible/plugins/connection/docker.py | 14 +++++++------- lib/ansible/plugins/connection/jail.py | 6 +++--- lib/ansible/plugins/connection/libvirt_lxc.py | 6 +++--- 5 files changed, 16 insertions(+), 15 deletions(-) diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py index 0dacd021457..7b04b526906 100644 --- a/lib/ansible/plugins/action/fetch.py +++ b/lib/ansible/plugins/action/fetch.py @@ -25,6 +25,7 @@ from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash from ansible.utils.path import makedirs_safe +from ansible.utils.unicode import to_bytes class ActionModule(ActionBase): @@ -158,7 +159,7 @@ class ActionModule(ActionBase): self._connection.fetch_file(source, dest) else: try: - f = open(dest, 'w') + f = open(to_bytes(dest, errors='strict'), 'w') f.write(remote_data) f.close() except (IOError, OSError) as e: diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py index 0778a5e22ca..7918ac5602c 100644 --- a/lib/ansible/plugins/connection/chroot.py +++ b/lib/ansible/plugins/connection/chroot.py @@ -91,7 +91,7 @@ class Connection(ConnectionBase): local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd), host=self.chroot) - local_cmd = map(to_bytes, local_cmd) + local_cmd = [to_bytes(i, errors='strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py index 130317f24aa..b1c499ed725 100644 --- a/lib/ansible/plugins/connection/docker.py +++ b/lib/ansible/plugins/connection/docker.py @@ -127,7 +127,7 @@ class Connection(ConnectionBase): local_cmd = [self.docker_cmd, "exec", '-i', self._play_context.remote_addr, executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr) - local_cmd = map(to_bytes, local_cmd) + local_cmd = [to_bytes(i, errors='strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -154,14 +154,14 @@ class Connection(ConnectionBase): display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr) out_path = self._prefix_login_path(out_path) - if not os.path.exists(in_path): + if not os.path.exists(to_bytes(in_path, errors='strict')): raise AnsibleFileNotFound( "file or module does not exist: %s" % in_path) if self.can_copy_bothways: # only docker >= 1.8.1 can do this natively args = [ self.docker_cmd, "cp", in_path, "%s:%s" % (self._play_context.remote_addr, out_path) ] - args = map(to_bytes, args) + args = [to_bytes(i, errors='strict') for i in args] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: @@ -173,8 +173,8 @@ class Connection(ConnectionBase): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' args = [self.docker_cmd, "exec", "-i", self._play_context.remote_addr, executable, "-c", "dd of=%s bs=%s" % (out_path, BUFSIZE)] - args = map(to_bytes, args) - with open(in_path, 'rb') as in_file: + args = [to_bytes(i, errors='strict') for i in args] + with open(to_bytes(in_path, errors='strict'), 'rb') as in_file: try: p = subprocess.Popen(args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -196,7 +196,7 @@ class Connection(ConnectionBase): out_dir = os.path.dirname(out_path) args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir] - args = map(to_bytes, args) + args = [to_bytes(i, errors='strict') for i in args] p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -205,7 +205,7 @@ class Connection(ConnectionBase): # Rename if needed actual_out_path = os.path.join(out_dir, os.path.basename(in_path)) if actual_out_path != out_path: - os.rename(actual_out_path, out_path) + os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict')) def close(self): """ Terminate the connection. Nothing to do for Docker""" diff --git a/lib/ansible/plugins/connection/jail.py b/lib/ansible/plugins/connection/jail.py index 2196b1bf8f5..d44213f439b 100644 --- a/lib/ansible/plugins/connection/jail.py +++ b/lib/ansible/plugins/connection/jail.py @@ -111,7 +111,7 @@ class Connection(ConnectionBase): local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd,), host=self.jail) - local_cmd = map(to_bytes, local_cmd) + local_cmd = [to_bytes(i, errors='strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -154,7 +154,7 @@ class Connection(ConnectionBase): out_path = pipes.quote(self._prefix_login_path(out_path)) try: - with open(in_path, 'rb') as in_file: + with open(to_bytes(in_path, errors='strict'), 'rb') as in_file: try: p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file) except OSError: @@ -180,7 +180,7 @@ class Connection(ConnectionBase): except OSError: raise AnsibleError("jail connection requires dd command in the jail") - with open(out_path, 'wb+') as out_file: + with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file: try: chunk = p.stdout.read(BUFSIZE) while chunk: diff --git a/lib/ansible/plugins/connection/libvirt_lxc.py b/lib/ansible/plugins/connection/libvirt_lxc.py index 3bfff8b1c35..03e9771a2e5 100644 --- a/lib/ansible/plugins/connection/libvirt_lxc.py +++ b/lib/ansible/plugins/connection/libvirt_lxc.py @@ -91,7 +91,7 @@ class Connection(ConnectionBase): local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd] display.vvv("EXEC %s" % (local_cmd,), host=self.lxc) - local_cmd = map(to_bytes, local_cmd) + local_cmd = [to_bytes(i, errors='strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -127,7 +127,7 @@ class Connection(ConnectionBase): out_path = pipes.quote(self._prefix_login_path(out_path)) try: - with open(in_path, 'rb') as in_file: + with open(to_bytes(in_path, errors='strict'), 'rb') as in_file: try: p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file) except OSError: @@ -153,7 +153,7 @@ class Connection(ConnectionBase): except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") - with open(out_path, 'wb+') as out_file: + with open(to_bytes(out_path, errors='strict'), 'wb+') as out_file: try: chunk = p.stdout.read(BUFSIZE) while chunk: From 5c206eaf0bfb209c79251dbcf7aa979b7133ef5d Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Tue, 8 Mar 2016 22:35:08 -0800 Subject: [PATCH 0933/1113] Add additional plugins to connection tests. - docker - libvirt_lxc - jail These tests will not run as part of the build, but can be run manually. --- test/integration/test_connection.inventory | 25 ++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/test/integration/test_connection.inventory b/test/integration/test_connection.inventory index b5edb558952..261bd7020f8 100644 --- a/test/integration/test_connection.inventory +++ b/test/integration/test_connection.inventory @@ -12,6 +12,28 @@ chroot-no-pipelining ansible_ssh_pipelining=false ansible_host=/ ansible_connection=chroot +[docker] +docker-pipelining ansible_ssh_pipelining=true +docker-no-pipelining ansible_ssh_pipelining=false +[docker:vars] +ansible_host=ubuntu-latest +ansible_connection=docker + +[libvirt_lxc] +libvirt_lxc-pipelining ansible_ssh_pipelining=true +libvirt_lxc-no-pipelining ansible_ssh_pipelining=false +[libvirt_lxc:vars] +ansible_host=lv-ubuntu-wily-amd64 +ansible_connection=libvirt_lxc + +[jail] +jail-pipelining ansible_ssh_pipelining=true +jail-no-pipelining ansible_ssh_pipelining=false +[jail:vars] +ansible_host=freebsd_10_2 +ansible_connection=jail +ansible_python_interpreter=/usr/local/bin/python + [ssh] ssh-pipelining ansible_ssh_pipelining=true ssh-no-pipelining ansible_ssh_pipelining=false @@ -27,5 +49,8 @@ ansible_host=localhost ansible_connection=paramiko_ssh [skip-during-build:children] +docker +libvirt_lxc +jail ssh paramiko_ssh From b7813fd6fd9da5f3625072ca49cbf46cd2f54918 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 12:11:52 -0500 Subject: [PATCH 0934/1113] Also adding python-systemd to tox test-requirements.txt --- test-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/test-requirements.txt b/test-requirements.txt index ec176f5bf29..34de42bc14a 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,3 +10,4 @@ coveralls unittest2 redis python-memcached +python-systemd From 52efd7438c44c127e5687200bda2174e17086f68 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 14:06:41 -0500 Subject: [PATCH 0935/1113] Fixing template/assemble action plugins related to tmp dir use/cleanup --- lib/ansible/plugins/action/__init__.py | 4 ++-- lib/ansible/plugins/action/assemble.py | 18 ++++++++++++++---- lib/ansible/plugins/action/copy.py | 4 ++-- lib/ansible/plugins/action/template.py | 22 +++++++++++++++------- 4 files changed, 33 insertions(+), 15 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 093ddd058e5..917e16dec38 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -291,7 +291,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): res = self._low_level_execute_command(cmd, sudoable=sudoable) return res - def _execute_remote_stat(self, path, all_vars, follow): + def _execute_remote_stat(self, path, all_vars, follow, tmp=None): ''' Get information from remote file. ''' @@ -302,7 +302,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): get_checksum=True, checksum_algo='sha1', ) - mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars) + mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars, tmp=tmp, delete_remote_tmp=(tmp is None)) if 'failed' in mystat and mystat['failed']: raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, mystat['msg'])) diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py index 3dc0bad53f5..eeb13c21ae9 100644 --- a/lib/ansible/plugins/action/assemble.py +++ b/lib/ansible/plugins/action/assemble.py @@ -97,8 +97,15 @@ class ActionModule(ActionBase): result['msg'] = "src and dest are required" return result + cleanup_remote_tmp = False + if not tmp: + tmp = self._make_tmp_path() + cleanup_remote_tmp = True + if boolean(remote_src): - result.update(self._execute_module(tmp=tmp, task_vars=task_vars)) + result.update(self._execute_module(tmp=tmp, task_vars=task_vars, delete_remote_tmp=False)) + if cleanup_remote_tmp: + self._remove_tmp_path(tmp) return result elif self._task._role is not None: src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src) @@ -119,7 +126,7 @@ class ActionModule(ActionBase): path_checksum = checksum_s(path) dest = self._remote_expand_user(dest) - dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow) + dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow, tmp=tmp) diff = {} @@ -152,11 +159,14 @@ class ActionModule(ActionBase): new_module_args.update( dict( src=xfered,)) - res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp) + res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False) if diff: res['diff'] = diff result.update(res) else: - result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp)) + result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False)) + + if tmp and cleanup_remote_tmp: + self._remove_tmp_path(tmp) return result diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index a833b28b160..b8094b2bd61 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -169,7 +169,7 @@ class ActionModule(ActionBase): dest_file = self._connection._shell.join_path(dest) # Attempt to get remote file info - dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow) + dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp) if dest_status['exists'] and dest_status['isdir']: # The dest is a directory. @@ -182,7 +182,7 @@ class ActionModule(ActionBase): else: # Append the relative source location to the destination and get remote stats again dest_file = self._connection._shell.join_path(dest, source_rel) - dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow) + dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp) if dest_status['exists'] and not force: # remote_file does not exist so continue to next iteration. diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 22782b225a8..5ddd624bde2 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -33,21 +33,21 @@ class ActionModule(ActionBase): TRANSFERS_FILES = True - def get_checksum(self, dest, all_vars, try_directory=False, source=None): + def get_checksum(self, dest, all_vars, try_directory=False, source=None, tmp=None): try: - dest_stat = self._execute_remote_stat(dest, all_vars=all_vars, follow=False) + dest_stat = self._execute_remote_stat(dest, all_vars=all_vars, follow=False, tmp=tmp) if dest_stat['exists'] and dest_stat['isdir'] and try_directory and source: base = os.path.basename(source) dest = os.path.join(dest, base) - dest_stat = self._execute_remote_stat(dest, all_vars=all_vars, follow=False) + dest_stat = self._execute_remote_stat(dest, all_vars=all_vars, follow=False, tmp=tmp) except Exception as e: return dict(failed=True, msg=to_bytes(e)) return dest_stat['checksum'] - def run(self, tmp='', task_vars=None): + def run(self, tmp=None, task_vars=None): ''' handler for template operations ''' if task_vars is None: task_vars = dict() @@ -137,8 +137,13 @@ class ActionModule(ActionBase): result['msg'] = type(e).__name__ + ": " + str(e) return result + cleanup_remote_tmp = False + if not tmp: + tmp = self._make_tmp_path() + cleanup_remote_tmp = True + local_checksum = checksum_s(resultant) - remote_checksum = self.get_checksum(dest, task_vars, not directory_prepended, source=source) + remote_checksum = self.get_checksum(dest, task_vars, not directory_prepended, source=source, tmp=tmp) if isinstance(remote_checksum, dict): # Error from remote_checksum is a dict. Valid return is a str result.update(remote_checksum) @@ -170,7 +175,7 @@ class ActionModule(ActionBase): follow=True, ), ) - result.update(self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars)) + result.update(self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False)) if result.get('changed', False) and self._play_context.diff: result['diff'] = diff @@ -189,6 +194,9 @@ class ActionModule(ActionBase): follow=True, ), ) - result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars)) + result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False)) + + if tmp and cleanup_remote_tmp: + self._remove_tmp_path(tmp) return result From f700a7e3ce8b8105232409fcefa6896d0acceedf Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 14:07:32 -0500 Subject: [PATCH 0936/1113] Removing forced pull from build step in run_tests.sh --- test/utils/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index 05c110d19e7..1bea359b543 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -8,7 +8,7 @@ if [ "${TARGET}" = "sanity" ]; then if test x"$TOXENV" != x'py24' ; then tox ; fi if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi else - docker build --pull=true -t ansible_test/${TARGET} test/utils/docker/${TARGET} + docker build -t ansible_test/${TARGET} test/utils/docker/${TARGET} docker run -d --volume="${PWD}:/root/ansible" ${TARGET_OPTIONS} ansible_test/${TARGET} > /tmp/cid_${TARGET} docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c "export TEST_FLAGS='${TEST_FLAGS}'; cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)" docker kill $(cat /tmp/cid_${TARGET}) From 08f270ad712fc1842008c33f9e30e12768fe9bdd Mon Sep 17 00:00:00 2001 From: Devananda van der Veen <devananda.vdv@gmail.com> Date: Wed, 9 Mar 2016 17:15:11 -0800 Subject: [PATCH 0937/1113] Send empty command before recv On some switches, starting an SSH connection and immediately calling recv() will result in a connection timeout. The switch requires some input on the channel before it provides any prompt. As such, this patch sends an empty command immediately upon connection, triggering the switch to send a prompt which the shell can then interpret. Signed-off-by: Devananda van der Veen <devananda.vdv@gmail.com> --- lib/ansible/module_utils/shell.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/shell.py b/lib/ansible/module_utils/shell.py index bcb88fa790b..5b946634576 100644 --- a/lib/ansible/module_utils/shell.py +++ b/lib/ansible/module_utils/shell.py @@ -109,6 +109,7 @@ class Shell(object): self.shell = self.ssh.invoke_shell() self.shell.settimeout(10) + self.shell.sendall("\n") self.receive() def strip(self, data): From cf27cf0a9e726f45f4464f8d66856bbe35145d31 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 14:12:27 -0500 Subject: [PATCH 0938/1113] Updating RELEASES/etc. for devel [ci skip] --- RELEASES.txt | 4 +++- packaging/debian/changelog | 27 +++++++++++++++++++++++++++ packaging/rpm/ansible.spec | 12 ++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/RELEASES.txt b/RELEASES.txt index cd32b0cddb0..d04845c3926 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -4,11 +4,13 @@ Ansible Releases at a Glance Active Development ++++++++++++++++++ -2.0 "Over the Hills and Far Away" - in progress +2.1 "TBD" - in progress Released ++++++++ +2.0.1 "Over the Hills and Far Away" 02-24-2015 +2.0.0 "Over the Hills and Far Away" 01-12-2015 1.9.4 "Dancing In the Streets" 10-09-2015 1.9.3 "Dancing In the Streets" 09-03-2015 1.9.2 "Dancing In the Streets" 06-24-2015 diff --git a/packaging/debian/changelog b/packaging/debian/changelog index c09f92da390..c3fa7ffff04 100644 --- a/packaging/debian/changelog +++ b/packaging/debian/changelog @@ -4,6 +4,33 @@ ansible (%VERSION%-%RELEASE%~%DIST%) %DIST%; urgency=low -- Ansible, Inc. <support@ansible.com> %DATE% +ansible (2.0.1.0) unstable; urgency=low + + * 2.0.1.0 + + -- Ansible, Inc. <support@ansible.com> Wed, 24 Feb 2016 18:28:59 -0500 + + +ansible (2.0.0.2) unstable; urgency=low + + * 2.0.0.2 + + -- Ansible, Inc. <support@ansible.com> Thu, 14 Jan 2016 17:17:41 -0500 + + +ansible (2.0.0.1) unstable; urgency=low + + * 2.0.0.1 + + -- Ansible, Inc. <support@ansible.com> Tue, 12 Jan 2016 17:53:29 -0500 + + +ansible (2.0.0.0) unstable; urgency=low + + * 2.0.0.0 + + -- Ansible, Inc. <support@ansible.com> Tue, 12 Jan 2016 08:33:59 -0500 + ansible (1.9.4) unstable; urgency=low * 1.9.4 diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index 75855cb65cf..cc87403ede3 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -125,6 +125,18 @@ rm -rf %{buildroot} %changelog +* Wed Feb 24 2016 Ansible, Inc. <support@ansible.com> - 2.0.1.0-1 +- Release 2.0.1.0-1 + +* Thu Jan 14 2016 Ansible, Inc. <support@ansible.com> - 2.0.0.2-1 +- Release 2.0.0.2-1 + +* Tue Jan 12 2016 Ansible, Inc. <support@ansible.com> - 2.0.0.1-1 +- Release 2.0.0.1-1 + +* Tue Jan 12 2016 Ansible, Inc. <support@ansible.com> - 2.0.0.0-1 +- Release 2.0.0.0-1 + * Fri Oct 09 2015 Ansible, Inc. <support@ansible.com> - 1.9.4 - Release 1.9.4 From 7e3c92e909d60fd02c1ffd933f7594e52ae2cd5a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 10 Mar 2016 15:06:47 -0500 Subject: [PATCH 0939/1113] predictable docker names and autoremove if no fail --- test/utils/run_tests.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index 1bea359b543..a7b7d10ad2d 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -1,6 +1,6 @@ #!/bin/sh -xe -if [ "${TARGET}" = "sanity" ]; then +if [ "${TARGET}" == "sanity" ]; then ./test/code-smell/replace-urlopen.sh . ./test/code-smell/use-compat-six.sh lib ./test/code-smell/boilerplate.sh @@ -8,8 +8,13 @@ if [ "${TARGET}" = "sanity" ]; then if test x"$TOXENV" != x'py24' ; then tox ; fi if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi else + export C_NAME="testAbull_$$_$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 8 | head -n 1)" docker build -t ansible_test/${TARGET} test/utils/docker/${TARGET} - docker run -d --volume="${PWD}:/root/ansible" ${TARGET_OPTIONS} ansible_test/${TARGET} > /tmp/cid_${TARGET} + docker run -d --volume="${PWD}:/root/ansible" --name "${C_NAME}" ${TARGET_OPTIONS} ansible_test/${TARGET} > /tmp/cid_${TARGET} docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c "export TEST_FLAGS='${TEST_FLAGS}'; cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)" docker kill $(cat /tmp/cid_${TARGET}) + + if [ "X${TESTS_KEEP_CONTAINER}" == "X" ]; then + docker rm "${C_NAME}" + fi fi From 16f107a49196f2afc38d204c61bce0f3ff37a4ae Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 15:05:55 -0500 Subject: [PATCH 0940/1113] Add mysql connection_timeout param to module_utils/mysql.py --- lib/ansible/module_utils/mysql.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/mysql.py b/lib/ansible/module_utils/mysql.py index 48e00adfd9c..74ae3d8c78e 100644 --- a/lib/ansible/module_utils/mysql.py +++ b/lib/ansible/module_utils/mysql.py @@ -29,7 +29,7 @@ -def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None): +def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None, connect_timeout=30): config = { 'host': module.params['login_host'], 'ssl': { @@ -58,6 +58,8 @@ def mysql_connect(module, login_user=None, login_password=None, config_file='', config['ssl']['ca'] = ssl_ca if db is not None: config['db'] = db + if connect_timeout is not None: + config['connect_timeout'] = connect_timeout db_connection = MySQLdb.connect(**config) if cursor_class is not None: From 20eee14a2cf10e3699a5dece0dfada550419438f Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Thu, 10 Mar 2016 12:18:00 -0800 Subject: [PATCH 0941/1113] Lower connect timeout for test_mysql_variables. --- test/integration/roles/test_mysql_variables/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/integration/roles/test_mysql_variables/tasks/main.yml b/test/integration/roles/test_mysql_variables/tasks/main.yml index 5344e5b1b48..192472bf46f 100644 --- a/test/integration/roles/test_mysql_variables/tasks/main.yml +++ b/test/integration/roles/test_mysql_variables/tasks/main.yml @@ -194,6 +194,9 @@ #============================================================ # Verify mysql_variable fails with an incorrect login_host parameter # +- name: lower mysql connect timeout + ini_file: dest="{{ansible_env.HOME}}/.my.cnf" section=client option=connect_timeout value=5 + - name: query mysql_variable using incorrect login_host mysql_variables: variable=wait_timeout login_host=12.0.0.9 register: result From c6af811573bf159e848979b3d6458ca837c958bd Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 15:34:06 -0500 Subject: [PATCH 0942/1113] Fixing equality check in run_tests.sh for sanity --- test/utils/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index a7b7d10ad2d..e88d5060d76 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -1,6 +1,6 @@ #!/bin/sh -xe -if [ "${TARGET}" == "sanity" ]; then +if [ "${TARGET}" = "sanity" ]; then ./test/code-smell/replace-urlopen.sh . ./test/code-smell/use-compat-six.sh lib ./test/code-smell/boilerplate.sh From 1871f0ec164f3bca25ee0802b198c831e1194733 Mon Sep 17 00:00:00 2001 From: amesbury <alan@anarchycorp.com> Date: Thu, 10 Mar 2016 16:27:43 -0600 Subject: [PATCH 0943/1113] Fixed typo --- docsite/rst/become.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 4a5f9730153..2698b226bb9 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -7,7 +7,7 @@ Ansible can use existing privilege escalation systems to allow a user to execute Become `````` -Ansible allows you 'become' another user, different from the user that logged into the machine (remote user). This is done existing +Ansible allows you 'become' another user, different from the user that logged into the machine (remote user). This is done using existing privilege escalation tools, which you probably already use or have configured, like 'sudo', 'su', 'pfexec', 'doas', 'pbrun' and others. From ba1bcdfc17b95daf045e95a437809813e9de819c Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Thu, 10 Mar 2016 15:34:31 -0800 Subject: [PATCH 0944/1113] Add noseclabel support to libvirt_lxc plugin. --- docsite/rst/intro_configuration.rst | 11 +++++++++++ examples/ansible.cfg | 3 +++ lib/ansible/constants.py | 1 + lib/ansible/plugins/connection/libvirt_lxc.py | 7 ++++++- 4 files changed, 21 insertions(+), 1 deletion(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 190c0cf6be3..0bc6fbfad7b 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -952,6 +952,17 @@ The default list is: nfs,vboxsf,fuse,ramfs:: special_context_filesystems = nfs,vboxsf,fuse,ramfs,myspecialfs +libvirt_lxc_noseclabel +====================== + +.. versionadded:: 2.1 + +This setting causes libvirt to connect to lxc containers by passing --noseclabel to virsh. +This is necessary when running on systems which do not have SELinux. +The default behavior is no:: + + libvirt_lxc_noseclabel = True + Galaxy Settings --------------- diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 6c265e9bf28..8465ccca4bb 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -278,6 +278,9 @@ # needs to be changed to use the file system dependent context. #special_context_filesystems=nfs,vboxsf,fuse,ramfs +# Set this to yes to allow libvirt_lxc connections to work without SELinux. +#libvirt_lxc_noseclabel = yes + [colors] #higlight = white #verbose = blue diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 796073c95bc..4def61e1aa0 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -167,6 +167,7 @@ DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBL # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True) +DEFAULT_LIBVIRT_LXC_NOSECLABEL = get_config(p, 'selinux', 'libvirt_lxc_noseclabel', 'LIBVIRT_LXC_NOSECLABEL', False, boolean=True) ### PRIVILEGE ESCALATION ### # Backwards Compat diff --git a/lib/ansible/plugins/connection/libvirt_lxc.py b/lib/ansible/plugins/connection/libvirt_lxc.py index 03e9771a2e5..9c4d6eac27c 100644 --- a/lib/ansible/plugins/connection/libvirt_lxc.py +++ b/lib/ansible/plugins/connection/libvirt_lxc.py @@ -88,7 +88,12 @@ class Connection(ConnectionBase): return the process's exit code immediately. ''' executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' - local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd] + local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace'] + + if C.DEFAULT_LIBVIRT_LXC_NOSECLABEL: + local_cmd += ['--noseclabel'] + + local_cmd += [self.lxc, '--', executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd,), host=self.lxc) local_cmd = [to_bytes(i, errors='strict') for i in local_cmd] From a59fe25bb196965f7ba279de667b54730a650a7e Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Thu, 10 Mar 2016 21:15:15 -0500 Subject: [PATCH 0945/1113] bugfix for shared module shell.py removes get_cli_connection function which was left over from a refactor but was no longer in use --- lib/ansible/module_utils/shell.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/lib/ansible/module_utils/shell.py b/lib/ansible/module_utils/shell.py index bcb88fa790b..a3abe16032e 100644 --- a/lib/ansible/module_utils/shell.py +++ b/lib/ansible/module_utils/shell.py @@ -183,24 +183,3 @@ class Shell(object): self._matched_prompt = match.group() return True -def get_cli_connection(module): - host = module.params['host'] - port = module.params['port'] - if not port: - port = 22 - - username = module.params['username'] - password = module.params['password'] - - try: - cli = Cli() - cli.open(host, port=port, username=username, password=password) - except paramiko.ssh_exception.AuthenticationException, exc: - module.fail_json(msg=exc.message) - except socket.error, exc: - host = '%s:%s' % (host, port) - module.fail_json(msg=exc.strerror, errno=exc.errno, host=host) - except socket.timeout: - module.fail_json(msg='socket timed out') - - return cli From ab682b2917550ffc0d4f15f49ab54ee21005b014 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 10 Mar 2016 21:52:11 -0500 Subject: [PATCH 0946/1113] Moving docker tests to pre-compiled images --- test/utils/run_tests.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index e88d5060d76..2186c671b56 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -9,12 +9,11 @@ if [ "${TARGET}" = "sanity" ]; then if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi else export C_NAME="testAbull_$$_$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 8 | head -n 1)" - docker build -t ansible_test/${TARGET} test/utils/docker/${TARGET} - docker run -d --volume="${PWD}:/root/ansible" --name "${C_NAME}" ${TARGET_OPTIONS} ansible_test/${TARGET} > /tmp/cid_${TARGET} + docker run -d --volume="${PWD}:/root/ansible" --name "${C_NAME}" ${TARGET_OPTIONS} ansible/ansible:${TARGET} > /tmp/cid_${TARGET} docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c "export TEST_FLAGS='${TEST_FLAGS}'; cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)" docker kill $(cat /tmp/cid_${TARGET}) - if [ "X${TESTS_KEEP_CONTAINER}" == "X" ]; then + if [ "X${TESTS_KEEP_CONTAINER}" = "X" ]; then docker rm "${C_NAME}" fi fi From 336d19d3b06ad610a3abd47a5dd1156ab0588149 Mon Sep 17 00:00:00 2001 From: David Shrewsbury <shrewsbury.dave@gmail.com> Date: Fri, 11 Mar 2016 08:07:30 -0500 Subject: [PATCH 0947/1113] Set type for OpenStack 'verify' param The 'verify' param is a bool, so we don't want it to be an assumed str. --- lib/ansible/module_utils/openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py index 6dbf04a68a7..2740b51cb94 100644 --- a/lib/ansible/module_utils/openstack.py +++ b/lib/ansible/module_utils/openstack.py @@ -77,7 +77,7 @@ def openstack_full_argument_spec(**kwargs): auth=dict(default=None, type='dict', no_log=True), region_name=dict(default=None), availability_zone=dict(default=None), - verify=dict(default=True, aliases=['validate_certs']), + verify=dict(default=True, type='bool', aliases=['validate_certs']), cacert=dict(default=None), cert=dict(default=None), key=dict(default=None, no_log=True), From e613737b80affa87e468c1f68c465651a4369b1a Mon Sep 17 00:00:00 2001 From: mvgrimes <mgrimes@cpan.org> Date: Fri, 11 Mar 2016 11:14:48 -0500 Subject: [PATCH 0948/1113] Dynamically add ssl key to the mysql config hash iff needed Just including the `ssl` key in the config for MySQLdb.connect, causes it to check for the existence of SSL support. This patch only adds the key if one of the ssl configuration options is included. --- lib/ansible/module_utils/mysql.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/mysql.py b/lib/ansible/module_utils/mysql.py index 74ae3d8c78e..693650dac6b 100644 --- a/lib/ansible/module_utils/mysql.py +++ b/lib/ansible/module_utils/mysql.py @@ -31,11 +31,12 @@ def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None, connect_timeout=30): config = { - 'host': module.params['login_host'], - 'ssl': { - } + 'host': module.params['login_host'] } + if ssl_ca is not None or ssl_key is not None or ssl_cert is not None: + config['ssl'] = {} + if module.params['login_unix_socket']: config['unix_socket'] = module.params['login_unix_socket'] else: From 286d91d722d776f1df53cd3d637afeae861b272a Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 11 Mar 2016 11:25:28 -0500 Subject: [PATCH 0949/1113] Reorganizing tox stuff and making py3-specific requirements --- test/utils/run_tests.sh | 3 ++- test/utils/tox/requirements-py3.txt | 13 +++++++++++++ .../utils/tox/requirements.txt | 0 tox.ini | 8 +++++++- 4 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 test/utils/tox/requirements-py3.txt rename test-requirements.txt => test/utils/tox/requirements.txt (100%) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index 2186c671b56..d6476f862b5 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -1,4 +1,4 @@ -#!/bin/sh -xe +#!/bin/sh -x if [ "${TARGET}" = "sanity" ]; then ./test/code-smell/replace-urlopen.sh . @@ -8,6 +8,7 @@ if [ "${TARGET}" = "sanity" ]; then if test x"$TOXENV" != x'py24' ; then tox ; fi if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi else + set -e export C_NAME="testAbull_$$_$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 8 | head -n 1)" docker run -d --volume="${PWD}:/root/ansible" --name "${C_NAME}" ${TARGET_OPTIONS} ansible/ansible:${TARGET} > /tmp/cid_${TARGET} docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c "export TEST_FLAGS='${TEST_FLAGS}'; cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)" diff --git a/test/utils/tox/requirements-py3.txt b/test/utils/tox/requirements-py3.txt new file mode 100644 index 00000000000..1ff4fb0cb26 --- /dev/null +++ b/test/utils/tox/requirements-py3.txt @@ -0,0 +1,13 @@ +# +# Test requirements +# + +nose +mock >= 1.0.1, < 1.1 +passlib +coverage +coveralls +unittest2 +redis +python3-memcached +python-systemd diff --git a/test-requirements.txt b/test/utils/tox/requirements.txt similarity index 100% rename from test-requirements.txt rename to test/utils/tox/requirements.txt diff --git a/tox.ini b/tox.ini index a061218f3c0..f36f5841c58 100644 --- a/tox.ini +++ b/tox.ini @@ -1,8 +1,14 @@ [tox] envlist = py26,py27,py34,py35 +[testenv:py34] +deps = -r{toxinidir}/test/utils/tox/requirements-py3.txt + +[testenv:py35] +deps = -r{toxinidir}/test/utils/tox/requirements-py3.txt + [testenv] -deps = -r{toxinidir}/test-requirements.txt +deps = -r{toxinidir}/test/utils/tox/requirements.txt whitelist_externals = make commands = python --version From 250b6c0f3552f7f4bac4a9480c45f8d05f7c6e64 Mon Sep 17 00:00:00 2001 From: Robyn Bergeron <robyn.bergeron@gmail.com> Date: Fri, 11 Mar 2016 11:41:53 -0700 Subject: [PATCH 0950/1113] update proposals proposal update proposals proposal with updates discussed in last week's meeting. will move this and other related materials to the new repo after this merges. --- docs/proposals/proposals_process_proposal.MD | 27 +++++++++++--------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/docs/proposals/proposals_process_proposal.MD b/docs/proposals/proposals_process_proposal.MD index f8ccc9ad79d..eb83eeb1b99 100644 --- a/docs/proposals/proposals_process_proposal.MD +++ b/docs/proposals/proposals_process_proposal.MD @@ -33,17 +33,20 @@ Once the process and template are approved, a PR will be submitted for documenti ### Proposed Process 1: PROPOSAL CREATION -- Person making the proposal creates the proposal document in ansible/docs/proposals via PR, following the proposal template. -- Author of proposal PR updates the proposal with the PR # / link. +- Person making the proposal creates the proposal document in ansible/proposals via PR, following the proposal template/ +- Person making the proposal creates an issue in ansible/proposals for that proposal. +- Author of proposal PR updates the proposal with link to the created issue #. - Notify the community that this proposal exists. -- Author notifies ansible-devel mailing list for transparency, providing link to PR. -- Author includes commentary indicating that comments should *not* be in response to this email, but rather, community members should add comments or feedback to the pull request. +- Author notifies ansible-devel mailing list for transparency, providing link to issue. +- Author includes commentary indicating that comments should *not* be in response to this email, but rather, community members should add comments or feedback in the issue. +- PRs may be made to the proposal, and can merged or not at submitter's discretion, and should be discussed/linked in the issue. 2: KEEP THE PROPOSAL MOVING TOWARDS A DECISION. +- Create tags in the ansible/proposals repo to indicate progress of the various proposal issues; ie: Discussion, Ready for meeting, Approved. (Can be used in conjunction with a board on waffle.io to show this, kanban style.) - Proposals use public meetings as a mechanism to keep them moving. - All proposals are decided on in a public meeting by a combination of folks with commit access to Ansible and any interested parties / users, as well as the author of the proposal. Time for approvals will be a portion of the overall schedule; proposals will be reviewed in the order received and may occasionally be deferred to the next meeting. If we are overwhelmed, a separate meeting may be scheduled. -(Note: ample feedback in the comments of the proposal PR should allow for folks to come to broad consensus in one way or another in the meeting rather rapidly, generally without an actual counted vote. However, the decision should be made *in the meeting*, so as to avoid any questions around whether or not the approval of one Ansible maintain / committer reflects the opinions or decision of everyone.) +(Note: ample feedback in the comments of the proposal issue should allow for folks to come to broad consensus in one way or another in the meeting rather rapidly, generally without an actual counted vote. However, the decision should be made *in the meeting*, so as to avoid any questions around whether or not the approval of one Ansible maintain / committer reflects the opinions or decision of everyone.) - *New* proposals are explicitly added to the public IRC meeting agenda for each week by the meeting organizer for for acknowledgement of ongoing discussion and existence, and/or easy approval/rejection. (Either via a separate issue somewhere tracking any meeting items, or by adding a “meeting” label to the PR.) - Existing new, not-yet-approved proposals are reviewed weekly by meeting organizer to check for slow-moving/stalled proposals, or for flags from the proposal owner indicating that they'd like to have it addressed in the weeks meeting @@ -51,9 +54,9 @@ Once the process and template are approved, a PR will be submitted for documenti 3: PROPOSAL APPROVED - Amendments needed to the proposal after IRC discussion should be made immediately. - The proposal status should be changed to Approved / In Progress in the document. -- The proposal should be moved from /docs/proposals to a docs/roadmap folder (or similar). -- The proposal PR comments should be updated with a note by the meeting organizer that the proposal has been accepted, and further commentary should be in the PRs implementing the code itself. -- Proposals can also be PENDING (waiting on something), or DECLINED. +- The proposal should be moved from /ansible/proposals to a roadmap folder (or similar). +- The proposal issue comments should be updated with a note by the meeting organizer that the proposal has been accepted, and further commentary should be in the PRs implementing the code itself. +- Proposals can also be PENDING or NEEDS INFO (waiting on something), or DECLINED. 4: CODE IN PROGRESS - Approved proposals should be periodically checked for progress, especially if tied to a release and/or is noted as release blocking. @@ -93,9 +96,9 @@ Other Suggested things to include: - Approval of this proposed process is needed to create the actual documentation of the process. - Weekly, public IRC meetings (which should probably be documented Wrt time / day of week / etc. in the contributor documentation) of the Ansible development community. -- Creation of a “meeting” label in GitHub (or defining some other mechanism to gather items for a weekly meeting agenda, such as a separate issue in GitHub that links to the PRs.) +- Creation of appropriate labels in GitHub (or defining some other mechanism to gather items for a weekly meeting agenda, such as a separate issue in GitHub that links to the PRs.) - Coming to an agreement regarding “what qualifies as a feature or enhancement that requires a proposal, vs. just submitting a PR with code.” It could simply be that if the change is large or very complicated, our recommendation is always to file a proposal to ensure (a) transparency (b) that a contributor doesn’t waste their time on something that ultimately can’t be merged at this time. -- Nice to have: Any new proposal PR landing in docs/proposals is automatically merged and an email automatically notifies the mailing list of the existence and location of the PR for comments. +- Nice to have: Any new proposal PR landing in ansible/proposals is automatically merged and an email automatically notifies the mailing list of the existence and location of the proposal & related issue # for comments. ## Testing @@ -103,5 +106,5 @@ Testing of this proposal will literally be via submitting this proposal through ## Documentation: -- Documentation of the process, including “what is a feature or enhancement vs. just a regular PR,” along with the steps shown above, will be added to the Ansible documentation in .rst format via PR. The documentation should also provide guidance on the standard wording of the email notifying ansible-devel list that the proposal exists and is ready for review in the PR comments. -- A proposal template should also be created in the docs/proposals repo directory. +- Documentation of the process, including “what is a feature or enhancement vs. just a regular PR,” along with the steps shown above, will be added to the Ansible documentation in .rst format via PR. The documentation should also provide guidance on the standard wording of the email notifying ansible-devel list that the proposal exists and is ready for review in the issue comments. +- A proposal template should also be created in the ansible/proposals repo directory. From 1e85abfcf26b7c8cac8c5eb9894e5f5db601fe78 Mon Sep 17 00:00:00 2001 From: Jason McKerr <jmckerr@jmckerr-OSX.local> Date: Fri, 11 Mar 2016 14:11:06 -0500 Subject: [PATCH 0951/1113] updates based on feedback from docschick and bcoca. --- docsite/rst/committer_guidelines.rst | 32 ++++++++++++++-------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/docsite/rst/committer_guidelines.rst b/docsite/rst/committer_guidelines.rst index 13fd530dc68..9841d3b76ae 100644 --- a/docsite/rst/committer_guidelines.rst +++ b/docsite/rst/committer_guidelines.rst @@ -7,23 +7,23 @@ These guidelines apply to everyone. At the same time, this ISN’T a process doc That said, use the trust wisely. -If you abuse the trust and break components and builds, or waste a lot of time asking people to review incomplete or untested pull requests, the trust level falls and you may be asked not to commit or you may lose access to do so. +If you abuse the trust and break components and builds, etc., the trust level falls and you may be asked not to commit or you may lose access to do so. Features, High Level Design, and Roadmap ======================================== -As a core team member you will be part of the team that actually develops the roadmap! So be engaged and push for what you want. However, Red Hat as a company will commit to certain features, fixes, APIs, etc. for various releases. The company and the Ansible team still has to get those done and out the door. Obligations to users, the community, and customers come first. Because of that, a feature you may want to develop yourself may not get into a release if it impacts a lot of other parts of Ansible. +As a core team member, you are an integral part of the team that develops the roadmap. Please be engaged, and push for the features and fixes that you want to see. Also keep in mind that Red Hat, as a company, will commit to certain features, fixes, APIs, etc. for various releases. Red Hat, the company, and the Ansible team must get these committed features (etc.) completed and released as scheduled. Obligations to users, the community, and customers must come first. Because of thse commitments, a feature you want to develop yourself many not get into a release if it impacts a lot of other parts within Ansible. -Any other new features and changes to high level design should go through the proposal process (TBD), to ensure the community and core team have had a chance to review the idea and approve it. The core team will have sole responsibility for merging new features based on proposals. +Any other new features and changes to high level design should go through the proposal process (TBD), to ensure the community and core team have had a chance to review the idea and approve it. The core team has sole responsibility for merging new features based on proposals. Our Workflow on GitHub ====================== As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you’re aware of the following workflow steps: -* Fork the repository upon which you want to do some work +* Fork the repository upon which you want to do some work to your own personal repository * Work on the specific branch upon which you need to commit -* Create a Pull Request and tag the people you would like to review; assign someone as the primary “owner” of your request +* Create a Pull Request back to the Ansible repository and tag the people you would like to review; assign someone as the primary “owner” of your request * Adjust code as necessary based on the Comments provided * Ask someone on the Core Team to do a final review and merge @@ -39,26 +39,26 @@ Roles on Core General Rules ============= -Individuals with direct commit access to ansible/ansible (+core, + extras) are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than a list of what you *can* do, this is a list of what you *should not* do and, in lieu of anything else, individuals with this power are expected to use their best judgement. +Individuals with direct commit access to ansible/ansible (+core, + extras) are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement. * Don’t commit directly. -* PRs that have tests will be looked at with more priority than PRs without tests. Of course not all changes require tests, but for bug fixes or functionality changes, please add tests. -* Documentation. If your PR is new feature or a change to behavior, make sure you’ve updated associated documentation or notified the right people to do so. It also helps to add the version of Core against which this documentation is compatible (to avoid confusion with stable versus devel docs, for backwards compatibility, etc.). -* Someone else should merge your pull requests. If you are a Core Committer you have leeway here for minor changes. -* After a merge clean up dead forks/branches. Don’t leave a mess hanging around. +* Don't omit tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes. +* Don't forget the docs! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so. It also helps to add the version of Core against which this documentation is compatible (to avoid confusion with stable versus devel docs, for backwards compatibility, etc.). +* Don't merge your own PRs. Someone else should have a chance to review and approve the PR merge. If you are a Core Committer, you have a small amount of leeway here for very minor changes. * Consider backwards compatibility (don’t break existing playbooks). -* Consider alternate environments (yes, people have bad environments, but they are the ones that need us the most). -* Always discuss the technical merits, never address the person’s limitations (you can later go for beers and call them idiots, but not in IRC/Github/etc). -* Consider the maintenance burden, some things are cool to have, but might not be worth shoehorning in. -* Complexity breeds all kinds of problems, so keep it simple. -* Lastly, comitters that have no activity on the project (merges, triage, commits, etc) will have permissions suspended. +* Don't forget about alternate environments. Consider the alternatives--yes, people have bad environments, but they are the ones who need us the most. +* Don't drag your community team members down. Always discuss the technical merits, but you should never address the person’s limitations (you can later go for beers and call them idiots, but not in IRC/Github/etc.). +* Don't forget about the maintenance burden. Some things are really cool to have, but they might not be worth shoehorning in if the maintenance burden is too great. +* Don't break playbooks. Always keep backwards compatibility in mind. +* Don't forget to keep it simple. Complexity breeds all kinds of problems. +* Don't forget to be active. Committers who have no activity on the project (through merges, triage, commits, etc.) will have their permissions suspended. Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community. People ====== -Individuals who have been asked to become part of this group have generally been contributing in significant ways to the Ansible community for some time. Should they agree, they are requested to add their names & github IDs to this file below via pull request, indicating that they agree to act in the ways that their fellow committers trust that they will act. +Individuals who've been asked to become a part of this group have generally been contributing in significant ways to the Ansible community for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, via a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act. * James Cammarata (RedHat/Ansible) * Brian Coca (RedHat/Ansible) From 6c7ffbfbcd17eccdcb7242bcfb4bf39f6f0c7d84 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Sat, 12 Mar 2016 01:37:43 +0100 Subject: [PATCH 0952/1113] Reformat new module lists Make the Changelog easier to parse, especially the list of new modules --- CHANGELOG.md | 846 +++++++++++++++++++++++++++------------------------ 1 file changed, 454 insertions(+), 392 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b961501b79e..3a7ac769684 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,19 +9,22 @@ Ansible Changes By Release * added ansible-console tool, a REPL shell that allows running adhoc tasks against a chosen inventory (based on https://github.com/dominis/ansible-shell ) ####New Modules: -* aws: ec2_vol_facts -* aws: ec2_vpc_dhcp_options -* aws: ec2_vpc_net_facts -* cloudstack: cs_volume -* cloudstack: cs_configuration -* cloudstack: cs_resourcelimit -* cloudstack: cs_instance_facts -* cloudstack: cs_pod -* cloudstack: cs_cluster -* cloudstack: cs_zone -* win_regmerge -* win_timezone -* yum_repository +- aws + * ec2_vol_facts + * ec2_vpc_dhcp_options + * ec2_vpc_net_facts +- cloudstack + * cs_cluster + * cs_configuration + * cs_instance_facts + * cs_pod + * cs_resourcelimit + * cs_volume + * cs_zone +- windows + * win_regmerge + * win_timezone +- yum_repository ####New Filters: @@ -194,202 +197,219 @@ allowed in future versions: ####New Modules: -* amazon: ec2_ami_copy -* amazon: ec2_ami_find -* amazon: ec2_elb_facts -* amazon: ec2_eni -* amazon: ec2_eni_facts -* amazon: ec2_remote_facts -* amazon: ec2_vpc_igw -* amazon: ec2_vpc_net -* amazon: ec2_vpc_net_facts -* amazon: ec2_vpc_route_table -* amazon: ec2_vpc_route_table_facts -* amazon: ec2_vpc_subnet -* amazon: ec2_vpc_subnet_facts -* amazon: ec2_win_password -* amazon: ecs_cluster -* amazon: ecs_task -* amazon: ecs_taskdefinition -* amazon: elasticache_subnet_group_facts -* amazon: iam -* amazon: iam_cert -* amazon: iam_policy -* amazon: route53_facts -* amazon: route53_health_check -* amazon: route53_zone -* amazon: sts_assume_role -* amazon: s3_bucket -* amazon: s3_lifecycle -* amazon: s3_logging -* amazon: sqs_queue -* amazon: sns_topic -* amazon: sts_assume_role -* apk -* bigip_gtm_wide_ip -* bundler -* centurylink: clc_aa_policy -* centurylink: clc_alert_policy -* centurylink: clc_blueprint_package -* centurylink: clc_firewall_policy -* centurylink: clc_group -* centurylink: clc_loadbalancer -* centurylink: clc_modify_server -* centurylink: clc_publicip -* centurylink: clc_server -* centurylink: clc_server_snapshot -* circonus_annotation -* consul -* consul_acl -* consul_kv -* consul_session -* cloudtrail -* cloudstack: cs_account -* cloudstack: cs_affinitygroup -* cloudstack: cs_domain -* cloudstack: cs_facts -* cloudstack: cs_firewall -* cloudstack: cs_iso -* cloudstack: cs_instance -* cloudstack: cs_instancegroup -* cloudstack: cs_ip_address -* cloudstack: cs_loadbalancer_rule -* cloudstack: cs_loadbalancer_rule_member -* cloudstack: cs_network -* cloudstack: cs_portforward -* cloudstack: cs_project -* cloudstack: cs_sshkeypair -* cloudstack: cs_securitygroup -* cloudstack: cs_securitygroup_rule -* cloudstack: cs_staticnat -* cloudstack: cs_template -* cloudstack: cs_user -* cloudstack: cs_vmsnapshot -* cronvar -* datadog_monitor -* deploy_helper -* docker: docker_login -* dpkg_selections -* elasticsearch_plugin -* expect -* find -* google: gce_tag -* hall -* ipify_facts -* iptables -* libvirt: virt_net -* libvirt: virt_pool -* maven_artifact -* openstack: os_auth -* openstack: os_client_config -* openstack: os_image -* openstack: os_image_facts -* openstack: os_floating_ip -* openstack: os_ironic -* openstack: os_ironic_node -* openstack: os_keypair -* openstack: os_network -* openstack: os_network_facts -* openstack: os_nova_flavor -* openstack: os_object -* openstack: os_port -* openstack: os_project -* openstack: os_router -* openstack: os_security_group -* openstack: os_security_group_rule -* openstack: os_server -* openstack: os_server_actions -* openstack: os_server_facts -* openstack: os_server_volume -* openstack: os_subnet -* openstack: os_subnet_facts -* openstack: os_user -* openstack: os_user_group -* openstack: os_volume -* openvswitch_db. -* osx_defaults -* pagerduty_alert -* pam_limits -* pear -* profitbricks: profitbricks -* profitbricks: profitbricks_datacenter -* profitbricks: profitbricks_nic -* profitbricks: profitbricks_volume -* profitbricks: profitbricks_volume_attachments -* profitbricks: profitbricks_snapshot -* proxmox: proxmox -* proxmox: proxmox_template -* puppet -* pushover -* pushbullet -* rax: rax_clb_ssl -* rax: rax_mon_alarm -* rax: rax_mon_check -* rax: rax_mon_entity -* rax: rax_mon_notification -* rax: rax_mon_notification_plan -* rabbitmq_binding -* rabbitmq_exchange -* rabbitmq_queue -* selinux_permissive -* sendgrid -* sensu_check -* sensu_subscription -* seport -* slackpkg -* solaris_zone -* taiga_issue -* vertica_configuration -* vertica_facts -* vertica_role -* vertica_schema -* vertica_user -* vmware: vca_fw -* vmware: vca_nat -* vmware: vmware_cluster -* vmware: vmware_datacenter -* vmware: vmware_dns_config -* vmware: vmware_dvs_host -* vmware: vmware_dvs_portgroup -* vmware: vmware_dvswitch -* vmware: vmware_host -* vmware: vmware_migrate_vmk -* vmware: vmware_portgroup -* vmware: vmware_target_canonical_facts -* vmware: vmware_vm_facts -* vmware: vmware_vm_vss_dvs_migrate -* vmware: vmware_vmkernel -* vmware: vmware_vmkernel_ip_config -* vmware: vmware_vsan_cluster -* vmware: vmware_vswitch -* vmware: vsphere_copy -* webfaction_app -* webfaction_db -* webfaction_domain -* webfaction_mailbox -* webfaction_site -* win_acl -* win_dotnet_ngen -* win_environment -* win_firewall_rule -* win_iis_virtualdirectory -* win_iis_webapplication -* win_iis_webapppool -* win_iis_webbinding -* win_iis_website -* win_lineinfile -* win_nssm -* win_package -* win_regedit -* win_scheduled_task -* win_unzip -* win_updates -* win_webpicmd -* xenserver_facts -* zabbix_host -* zabbix_hostmacro -* zabbix_screen -* znode +- amazon + * ec2_ami_copy + * ec2_ami_find + * ec2_elb_facts + * ec2_eni + * ec2_eni_facts + * ec2_remote_facts + * ec2_vpc_igw + * ec2_vpc_net + * ec2_vpc_net_facts + * ec2_vpc_route_table + * ec2_vpc_route_table_facts + * ec2_vpc_subnet + * ec2_vpc_subnet_facts + * ec2_win_password + * ecs_cluster + * ecs_task + * ecs_taskdefinition + * elasticache_subnet_group_facts + * iam + * iam_cert + * iam_policy + * route53_facts + * route53_health_check + * route53_zone + * s3_bucket + * s3_lifecycle + * s3_logging + * sns_topic + * sqs_queue + * sts_assume_role +- apk +- bigip_gtm_wide_ip +- bundler +- centurylink + * clc_aa_policy + * clc_alert_policy + * clc_blueprint_package + * clc_firewall_policy + * clc_group + * clc_loadbalancer + * clc_modify_server + * clc_publicip + * clc_server + * clc_server_snapshot +- circonus_annotation +- consul + * consul + * consul_acl + * consul_kv + * consul_session +- cloudtrail +- cloudstack + * cs_account + * cs_affinitygroup + * cs_domain + * cs_facts + * cs_firewall + * cs_iso + * cs_instance + * cs_instancegroup + * cs_ip_address + * cs_loadbalancer_rule + * cs_loadbalancer_rule_member + * cs_network + * cs_portforward + * cs_project + * cs_securitygroup + * cs_securitygroup_rule + * cs_sshkeypair + * cs_staticnat + * cs_template + * cs_user + * cs_vmsnapshot +- cronvar +- datadog_monitor +- deploy_helper +- docker + * docker_login +- dpkg_selections +- elasticsearch_plugin +- expect +- find +- google + * gce_tag +- hall +- ipify_facts +- iptables +- libvirt + * virt_net + * virt_pool +- maven_artifact +- openstack + * os_auth + * os_client_config + * os_image + * os_image_facts + * os_floating_ip + * os_ironic + * os_ironic_node + * os_keypair + * os_network + * os_network_facts + * os_nova_flavor + * os_object + * os_port + * os_project + * os_router + * os_security_group + * os_security_group_rule + * os_server + * os_server_actions + * os_server_facts + * os_server_volume + * os_subnet + * os_subnet_facts + * os_user + * os_user_group + * os_volume +- openvswitch_db +- osx_defaults +- pagerduty_alert +- pam_limits +- pear +- profitbricks + * profitbricks + * profitbricks_datacenter + * profitbricks_nic + * profitbricks_snapshot + * profitbricks_volume + * profitbricks_volume_attachments +- proxmox + * proxmox + * proxmox_template +- puppet +- pushover +- pushbullet +- rax + * rax_clb_ssl + * rax_mon_alarm + * rax_mon_check + * rax_mon_entity + * rax_mon_notification + * rax_mon_notification_plan +- rabbitmq + * rabbitmq_binding + * rabbitmq_exchange + * rabbitmq_queue +- selinux_permissive +- sendgrid +- sensu + * sensu_check + * sensu_subscription +- seport +- slackpkg +- solaris_zone +- taiga_issue +- vertica + * vertica_configuration + * vertica_facts + * vertica_role + * vertica_schema + * vertica_user +- vmware + * vca_fw + * vca_nat + * vmware_cluster + * vmware_datacenter + * vmware_dns_config + * vmware_dvs_host + * vmware_dvs_portgroup + * vmware_dvswitch + * vmware_host + * vmware_migrate_vmk + * vmware_portgroup + * vmware_target_canonical_facts + * vmware_vm_facts + * vmware_vm_vss_dvs_migrate + * vmware_vmkernel + * vmware_vmkernel_ip_config + * vmware_vsan_cluster + * vmware_vswitch + * vsphere_copy +- webfaction + * webfaction_app + * webfaction_db + * webfaction_domain + * webfaction_mailbox + * webfaction_site +- windows + * win_acl + * win_dotnet_ngen + * win_environment + * win_firewall_rule + * win_iis_virtualdirectory + * win_iis_webapplication + * win_iis_webapppool + * win_iis_webbinding + * win_iis_website + * win_lineinfile + * win_nssm + * win_package + * win_regedit + * win_scheduled_task + * win_unzip + * win_updates + * win_webpicmd +- xenserver_facts +- zabbbix + * zabbix_host + * zabbix_hostmacro + * zabbix_screen +- znode ####New Inventory scripts: @@ -562,19 +582,19 @@ Major changes: New Modules: -* cryptab: manages linux encrypted block devices -* gce_img: for utilizing GCE image resources -* gluster_volume: manage glusterfs volumes -* haproxy: for the load balancer of same name -* known_hosts: manages the ssh known_hosts file -* lxc_container: manage lxc containers -* patch: allows for patching files on target systems -* pkg5: installing and uninstalling packages on Solaris -* pkg5_publisher: manages Solaris pkg5 repository configuration -* postgresql_ext: manage postgresql extensions -* snmp_facts: gather facts via snmp -* svc: manages daemontools based services -* uptimerobot: manage monitoring with this service +* cryptab *-- manages linux encrypted block devices* +* gce_img *-- for utilizing GCE image resources* +* gluster_volume *-- manage glusterfs volumes* +* haproxy *-- for the load balancer of same name* +* known_hosts *-- manages the ssh known_hosts file* +* lxc_container *-- manage lxc containers* +* patch *-- allows for patching files on target systems* +* pkg5 *-- installing and uninstalling packages on Solaris* +* pkg5_publisher *-- manages Solaris pkg5 repository configuration* +* postgresql_ext *-- manage postgresql extensions* +* snmp_facts *-- gather facts via snmp* +* svc *-- manages daemontools based services* +* uptimerobot *-- manage monitoring with this service* New Filters: @@ -710,15 +730,19 @@ Major changes: New Modules: -* cloud: rax_cdb - manages Rackspace Cloud Database instances -* cloud: rax_cdb_database - manages Rackspace Cloud Databases -* cloud: rax_cdb_user - manages Rackspace Cloud Database users -* monitoring: zabbix_maintaince - handles outage windows with Zabbix -* monitoring: bigpanda - support for bigpanda -* net_infrastructure: a10_server - manages server objects on A10 devices -* net_infrastructure: a10_service_group - manages service group objects on A10 devices -* net_infrastructure: a10_virtual_server - manages virtual server objects on A10 devices -* system: getent - read getent databases +- cloud + * rax_cdb *-- manages Rackspace Cloud Database instances* + * rax_cdb_database *-- manages Rackspace Cloud Databases* + * rax_cdb_user *-- manages Rackspace Cloud Database users* +- monitoring + * bigpanda *-- support for bigpanda* + * zabbix_maintaince *-- handles outage windows with Zabbix* +- net_infrastructure + * a10_server *-- manages server objects on A10 devices* + * a10_service_group *-- manages service group objects on A10 devices* + * a10_virtual_server *-- manages virtual server objects on A10 devices* +- system + * getent *-- read getent databases* Some other notable changes: @@ -797,19 +821,21 @@ New inventory scripts: New Modules: -* cloud: azure -* cloud: rax_meta -* cloud: rax_scaling_group -* cloud: rax_scaling_policy -* windows: version of setup module -* windows: version of slurp module -* windows: win_feature -* windows: win_get_url -* windows: win_msi -* windows: win_ping -* windows: win_user -* windows: win_service -* windows: win_group +- cloud + * azure + * rax_meta + * rax_scaling_group + * rax_scaling_policy +- windows + * *version of setup module* + * *version of slurp module* + * win_feature + * win_get_url + * win_group + * win_msi + * win_ping + * win_service + * win_user Other notable changes: @@ -892,40 +918,48 @@ Major features/changes: New Modules: -* files: replace -* packaging: cpanm (Perl) -* packaging: portage -* packaging: composer (PHP) -* packaging: homebrew_tap (OS X) -* packaging: homebrew_cask (OS X) -* packaging: apt_rpm -* packaging: layman -* monitoring: logentries -* monitoring: rollbar_deployment -* monitoring: librato_annotation -* notification: nexmo (SMS) -* notification: twilio (SMS) -* notification: slack (Slack.com) -* notification: typetalk (Typetalk.in) -* notification: sns (Amazon) -* system: debconf -* system: ufw -* system: locale_gen -* system: alternatives -* system: capabilities -* net_infrastructure: bigip_facts -* net_infrastructure: dnssimple -* net_infrastructure: lldp -* web_infrastructure: apache2_module -* cloud: digital_ocean_domain -* cloud: digital_ocean_sshkey -* cloud: rax_identity -* cloud: rax_cbs (cloud block storage) -* cloud: rax_cbs_attachments -* cloud: ec2_asg (configure autoscaling groups) -* cloud: ec2_scaling_policy -* cloud: ec2_metric_alarm -* cloud: vsphere_guest +- files + * replace +- packaging + * apt_rpm + * composer *(PHP)* + * cpanm *(Perl)* + * homebrew_cask *(OS X)* + * homebrew_tap *(OS X)* + * layman + * portage +- monitoring + * librato_annotation + * logentries + * rollbar_deployment +- notification + * nexmo *(SMS)* + * slack *(Slack.com)* + * sns *(Amazon)* + * twilio *(SMS)* + * typetalk *(Typetalk.in)* +- system + * alternatives + * capabilities + * debconf + * locale_gen + * ufw +- net_infrastructure + * bigip_facts + * dnssimple + * lldp +- web_infrastructure + * apache2_module +- cloud + * digital_ocean_domain + * digital_ocean_sshkey + * ec2_asg *(configure autoscaling groups)* + * ec2_metric_alarm + * ec2_scaling_policy + * rax_identity + * rax_cbs *(cloud block storage)* + * rax_cbs_attachments + * vsphere_guest Other notable changes: @@ -948,7 +982,6 @@ Other notable changes: * the get_url module now accepts url_username and url_password as parameters, so sites which require authentication no longer need to have them embedded in the url * ... to be filled in from changelogs ... -* ## 1.5.5 "Love Walks In" - April 18, 2014 @@ -999,19 +1032,23 @@ Major features/changes: New modules: -* cloud: ec2_elb_lb -* cloud: ec2_key -* cloud: ec2_snapshot -* cloud: rax_dns -* cloud: rax_dns_record -* cloud: rax_files -* cloud: rax_files_objects -* cloud: rax_keypair -* cloud: rax_queue -* cloud: docker_image -* messaging: rabbitmq_policy -* system: at -* utilities: assert +- cloud + * docker_image + * ec2_elb_lb + * ec2_key + * ec2_snapshot + * rax_dns + * rax_dns_record + * rax_files + * rax_files_objects + * rax_keypair + * rax_queue +- messaging + * rabbitmq_policy +- system + * at +- utilities + * assert Other notable changes (many new module params & bugfixes may not be listed): @@ -1090,37 +1127,46 @@ Highlighted new features: New modules and plugins. -* cloud: ec2_eip -- manage AWS elastic IPs -* cloud: ec2_vpc -- manage ec2 virtual private clouds -* cloud: elasticcache -- Manages clusters in Amazon Elasticache -* cloud: rax_network -- sets up Rackspace networks -* cloud: rax_facts: retrieve facts about a Rackspace Cloud Server -* cloud: rax_clb_nodes -- manage Rackspace cloud load balanced nodes -* cloud: rax_clb -- manages Rackspace cloud load balancers -* cloud: docker - instantiates/removes/manages docker containers -* cloud: ovirt -- VM lifecycle controls for ovirt -* files: acl -- set or get acls on a file -* files: unarchive: pushes and extracts tarballs -* files: synchronize: a useful wraper around rsyncing trees of files -* system: firewalld -- manage the firewalld configuration -* system: modprobe -- manage kernel modules on systems that support modprobe/rmmod -* system: open_iscsi -- manage targets on an initiator using open-iscsi -* system: blacklist: add or remove modules from the kernel blacklist -* system: hostname - sets the systems hostname -* utilities: include_vars -- dynamically load variables based on conditions. -* packaging: zypper_repository - adds or removes Zypper repositories -* packaging: urpmi - work with urpmi packages -* packaging: swdepot - a module for working with swdepot -* notification: grove - notifies to Grove hosted IRC channels -* web_infrastructure: ejabberd_user: add and remove users to ejabberd -* web_infrastructure: jboss: deploys or undeploys apps to jboss -* source_control: github_hooks: manages GitHub service hooks -* net_infrastructure: bigip_monitor_http: manages F5 BIG-IP LTM http monitors -* net_infrastructure: bigip_monitor_tcp: manages F5 BIG-IP LTM TCP monitors -* net_infrastructure: bigip_pool_member: manages F5 BIG-IP LTM pool members -* net_infrastructure: bigip_node: manages F5 BIG-IP LTM nodes -* net_infrastructure: openvswitch_port -* net_infrastructure: openvswitch_bridge +- cloud + * docker *- instantiates/removes/manages docker containers* + * ec2_eip *-- manage AWS elastic IPs* + * ec2_vpc *-- manage ec2 virtual private clouds* + * elasticcache *-- Manages clusters in Amazon Elasticache* + * ovirt *-- VM lifecycle controls for ovirt* + * rax_network *-- sets up Rackspace networks* + * rax_facts *-- retrieve facts about a Rackspace Cloud Server* + * rax_clb_nodes *-- manage Rackspace cloud load balanced nodes* + * rax_clb *-- manages Rackspace cloud load balancers* +- files + * acl *-- set or get acls on a file* + * synchronize *-- a useful wraper around rsyncing trees of files* + * unarchive *-- pushes and extracts tarballs* +- system + * blacklist *-- add or remove modules from the kernel blacklist* + * firewalld *-- manage the firewalld configuration* + * hostname *-- sets the systems hostname* + * modprobe *-- manage kernel modules on systems that support modprobe/rmmod* + * open_iscsi *-- manage targets on an initiator using open-iscsi* +- utilities + * include_vars *-- dynamically load variables based on conditions.* +- packaging + * swdepot *-- a module for working with swdepot* + * urpmi *-- work with urpmi packages* + * zypper_repository *-- adds or removes Zypper repositories* +- notification + * grove *-- notifies to Grove hosted IRC channels* +- web_infrastructure + * ejabberd_user *-- add and remove users to ejabberd* + * jboss *-- deploys or undeploys apps to jboss* +- source_control + * github_hooks *-- manages GitHub service hooks* +- net_infrastructure + * bigip_monitor_http *-- manages F5 BIG-IP LTM http monitors* + * bigip_monitor_tcp *-- manages F5 BIG-IP LTM TCP monitors* + * bigip_node *-- manages F5 BIG-IP LTM nodes* + * bigip_pool_member *-- manages F5 BIG-IP LTM pool members* + * openvswitch_port + * openvswitch_bridge Plugins: @@ -1223,26 +1269,35 @@ Highlighted new features: New modules: -* notifications: datadog_event -- send data to datadog -* cloud: digital_ocean -- module for DigitalOcean provisioning that also includes inventory support -* cloud: rds -- Amazon Relational Database Service -* cloud: linode -- modules for Linode provisioning that also includes inventory support -* cloud: route53 -- manage Amazon DNS entries -* cloud: ec2_ami -- manages (and creates!) ec2 AMIs -* database: mysql_replication -- manages mysql replication settings for masters/slaves -* database: mysql_variables -- manages mysql runtime variables -* database: redis -- manages redis databases (slave mode and flushing data) -* net_infrastructure: arista_interface -* net_infrastructure: arista_lag -* net_infrastructure: arista_l2interface -* net_infrastructure: arista_vlan -* system: stat -- reports on stat(istics) of remote files, for use with 'register' -* web_infrastructure: htpasswd -- manipulate htpasswd files -* packaging: rpm_key -- adds or removes RPM signing keys -* packaging: apt_repository -- rewritten to remove dependencies -* monitoring: boundary_meter -- adds or removes boundary.com meters -* net_infrastructure: dnsmadeeasy - manipulate DNS Made Easy records -* files: xattr -- manages extended attributes on files +- notifications + * datadog_event *-- send data to datadog* +- cloud + * digital_ocean *-- module for DigitalOcean provisioning that also includes inventory support* + * rds *-- Amazon Relational Database Service* + * linode *-- modules for Linode provisioning that also includes inventory support* + * route53 *-- manage Amazon DNS entries* + * ec2_ami *-- manages (and creates!) ec2 AMIs* +- database + * mysql_replication *-- manages mysql replication settings for masters/slaves* + * mysql_variables *-- manages mysql runtime variables* + * redis *-- manages redis databases (slave mode and flushing data)* +- net_infrastructure + * arista_interface + * arista_l2interface + * arista_lag + * arista_vlan + * dnsmadeeasy *-- manipulate DNS Made Easy records* +- system + * stat *-- reports on stat(istics) of remote files, for use with 'register'* +- web_infrastructure + * htpasswd *-- manipulate htpasswd files* +- packaging + * apt_repository *-- rewritten to remove dependencies* + * rpm_key *-- adds or removes RPM signing keys* +- monitoring + * boundary_meter *-- adds or removes boundary.com meters* +- files + * xattr *-- manages extended attributes on files* Misc changes: @@ -1396,40 +1451,48 @@ increasing the ease at which things can be reorganized. Modules added: -* cloud: rax: module for creating instances in the rackspace cloud (uses pyrax) -* packages: npm: node.js package management -* packages: pkgng: next-gen package manager for FreeBSD -* packages: redhat_subscription: manage Red Hat subscription usage -* packages: rhn_register: basic RHN registration -* packages: zypper (SuSE) -* database: postgresql_priv: manages postgresql privileges -* networking: bigip_pool: load balancing with F5s -* networking: ec2_elb: add and remove machines from ec2 elastic load balancers -* notification: hipchat: send notification events to hipchat -* notification: flowdock: send messages to flowdock during playbook runs -* notification: campfire: send messages to campfire during playbook runs -* notification: mqtt: send messages to the Mosquitto message bus -* notification: irc: send messages to IRC channels -* notification: filesystem - a wrapper around mkfs -* notification: jabber: send jabber chat messages -* notification: osx_say: make OS X say things out loud -* openstack: keystone_user -* openstack: glance_image -* openstack: nova_compute -* openstack: nova_keypair -* openstack: quantum_floating_ip -* openstack: quantum_floating_ip_associate -* openstack: quantum_network -* openstack: quantum_router -* openstack: quantum_router_gateway -* openstack: quantum_router_interface -* openstack: quantum_subnet -* monitoring: newrelic_deployment: notifies newrelic of new deployments -* monitoring: airbrake_deployment - notify airbrake of new deployments -* monitoring: pingdom -* monitoring: pagerduty -* monitoring: monit -* utility: set_fact: sets a variable, which can be the result of a template evaluation +- cloud + * rax *-- module for creating instances in the rackspace cloud (uses pyrax)* +- packages + * npm *-- node.js package management* + * pkgng *-- next-gen package manager for FreeBSD* + * redhat_subscription *-- manage Red Hat subscription usage* + * rhn_register *-- basic RHN registration* + * zypper *(SuSE)* +- database + * postgresql_priv *-- manages postgresql privileges* +- networking + * bigip_pool *-- load balancing with F5s* + * ec2_elb *-- add and remove machines from ec2 elastic load balancers* +- notification + * hipchat *-- send notification events to hipchat* + * flowdock *-- send messages to flowdock during playbook runs* + * campfire *-- send messages to campfire during playbook runs* + * mqtt *-- send messages to the Mosquitto message bus* + * irc *-- send messages to IRC channels* + * filesystem *-- a wrapper around mkfs* + * jabber *-- send jabber chat messages* + * osx_say *-- make OS X say things out loud* +- openstack + * glance_image + * nova_compute + * nova_keypair + * keystone_user + * quantum_floating_ip + * quantum_floating_ip_associate + * quantum_network + * quantum_router + * quantum_router_gateway + * quantum_router_interface + * quantum_subnet +- monitoring + * airbrake_deployment *-- notify airbrake of new deployments* + * monit + * newrelic_deployment *-- notifies newrelic of new deployments* + * pagerduty + * pingdom +- utility + * set_fact *-- sets a variable, which can be the result of a template evaluation* Modules removed @@ -1522,26 +1585,26 @@ Core Features Modules Added: -* bzr (bazaar version control) +* bzr *(bazaar version control)* * cloudformation * django-manage -* gem (ruby gems) +* gem *(ruby gems)* * homebrew -* lvg (logical volume groups) -* lvol (LVM logical volumes) +* lvg *(logical volume groups)* +* lvol *(LVM logical volumes)* * macports * mongodb_user * netscaler * okg * openbsd_pkg +* rabbit_mq_parameter * rabbit_mq_plugin * rabbit_mq_user * rabbit_mq_vhost -* rabbit_mq_parameter * rhn_channel -* s3 -- allows putting file contents in buckets for sharing over s3 -* uri module -- can get/put/post/etc -* vagrant -- launching VMs with vagrant, this is different from existing vagrant plugin +* s3 *-- allows putting file contents in buckets for sharing over s3* +* uri module *-- can get/put/post/etc* +* vagrant *-- launching VMs with vagrant, this is different from existing vagrant plugin* * zfs Bugfixes and Misc Changes: @@ -1643,12 +1706,12 @@ Plugins: New modules: -* new sysctl module -* new pacman module (Arch linux) -* new apt_key module -* hg module now in core -* new ec2_facts module -* added pkgin module for Joyent SmartOS +* apt_key +* ec2_facts +* hg *(now in core)* +* pacman *(Arch linux)* +* pkgin *(Joyent SmartOS)* +* sysctl New config settings: @@ -2135,4 +2198,3 @@ in kickstarts ## 0.0.2 and 0.0.1 * Initial stages of project - From a44b88936ba89fe05237ddf130bb69c05df1bdd0 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Fri, 11 Mar 2016 16:55:14 -0800 Subject: [PATCH 0953/1113] Add unicode dir to connection tests. --- test/integration/test_connection.yml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/test/integration/test_connection.yml b/test/integration/test_connection.yml index 5b3d4f1bc3c..27f24aafd6b 100644 --- a/test/integration/test_connection.yml +++ b/test/integration/test_connection.yml @@ -14,18 +14,20 @@ ### copy local file with unicode filename and content - name: create local file with unicode filename and content - local_action: lineinfile dest=/tmp/ansible-local-汉语 create=true line=汉语 + local_action: lineinfile dest=/tmp/ansible-local-汉语/汉语.txt create=true line=汉语 - name: remove remote file with unicode filename and content - file: path=/tmp/ansible-remote-汉语 state=absent + file: path=/tmp/ansible-remote-汉语/汉语.txt state=absent + - name: create remote directory with unicode name + file: path=/tmp/ansible-remote-汉语 state=directory - name: copy local file with unicode filename and content - copy: src=/tmp/ansible-local-汉语 dest=/tmp/ansible-remote-汉语 + copy: src=/tmp/ansible-local-汉语/汉语.txt dest=/tmp/ansible-remote-汉语/汉语.txt ### fetch remote file with unicode filename and content - name: remove local file with unicode filename and content - local_action: file path=/tmp/ansible-local-汉语 state=absent + local_action: file path=/tmp/ansible-local-汉语/汉语.txt state=absent - name: fetch remote file with unicode filename and content - fetch: src=/tmp/ansible-remote-汉语 dest=/tmp/ansible-local-汉语 fail_on_missing=true validate_checksum=true flat=true + fetch: src=/tmp/ansible-remote-汉语/汉语.txt dest=/tmp/ansible-local-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true ### remove local and remote temp files From 1e1852c34d25fd083013818097ad2d7b64f7dd7e Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Fri, 11 Mar 2016 16:55:51 -0800 Subject: [PATCH 0954/1113] Add missing to_bytes on directory path. --- lib/ansible/utils/path.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/path.py b/lib/ansible/utils/path.py index d8dc4234265..1fe62e85940 100644 --- a/lib/ansible/utils/path.py +++ b/lib/ansible/utils/path.py @@ -19,6 +19,7 @@ __metaclass__ = type import os from errno import EEXIST +from ansible.utils.unicode import to_bytes __all__ = ['unfrackpath'] @@ -33,7 +34,7 @@ def unfrackpath(path): def makedirs_safe(path, mode=None): '''Safe way to create dirs in muliprocess/thread environments''' - if not os.path.exists(path): + if not os.path.exists(to_bytes(path, errors='strict')): try: if mode: os.makedirs(path, mode) From e3671a8a83dacf5ca3025c6f19186b502523df17 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Fri, 11 Mar 2016 22:14:11 -0500 Subject: [PATCH 0955/1113] bugfix for eos shared module and new optimization This commit address to issues in the eos shard module. The first one is a bug fix for returning the running config when the transport is eapi. The shared module will now return config text instead of an object. The second is a optimization that delays when the eos module connects to the remote devices. This provies a performance enhancement when using ssh since the module doesn't default to connecting immediately --- lib/ansible/module_utils/eos.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/lib/ansible/module_utils/eos.py b/lib/ansible/module_utils/eos.py index 71fa8802b66..41dfbaeadd3 100644 --- a/lib/ansible/module_utils/eos.py +++ b/lib/ansible/module_utils/eos.py @@ -16,6 +16,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # + NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I) NET_COMMON_ARGS = dict( @@ -27,7 +28,7 @@ NET_COMMON_ARGS = dict( auth_pass=dict(no_log=True), transport=dict(choices=['cli', 'eapi']), use_ssl=dict(default=True, type='bool'), - provider=dict() + provider=dict(type='dict') ) def to_list(val): @@ -144,6 +145,11 @@ class NetworkModule(AnsibleModule): super(NetworkModule, self).__init__(*args, **kwargs) self.connection = None self._config = None + self._connected = False + + @property + def connected(self): + return self._connected @property def config(self): @@ -168,7 +174,7 @@ class NetworkModule(AnsibleModule): try: self.connection.connect() - self.execute('terminal length 0') + self.connection.send('terminal length 0') if self.params['authorize']: self.connection.authorize() @@ -176,12 +182,13 @@ class NetworkModule(AnsibleModule): except Exception, exc: self.fail_json(msg=exc.message) + self._connected = True + def configure(self, commands): commands = to_list(commands) commands.insert(0, 'configure terminal') responses = self.execute(commands) responses.pop(0) - return responses def config_replace(self, commands): @@ -195,6 +202,8 @@ class NetworkModule(AnsibleModule): def execute(self, commands, **kwargs): try: + if not self.connected: + self.connect() return self.connection.send(commands, **kwargs) except Exception, exc: self.fail_json(msg=exc.message, commands=commands) @@ -213,7 +222,7 @@ class NetworkModule(AnsibleModule): return self.execute(cmd)[0] else: resp = self.execute(cmd, encoding='text') - return resp[0] + return resp[0]['output'] def get_module(**kwargs): @@ -230,7 +239,5 @@ def get_module(**kwargs): if module.params['transport'] == 'cli' and not HAS_PARAMIKO: module.fail_json(msg='paramiko is required but does not appear to be installed') - module.connect() - return module From 12228301556546578f9cf161191ffed806cd8c73 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 13 Mar 2016 03:19:47 -0400 Subject: [PATCH 0956/1113] Initial commit for Ubuntu 12.04 docker config for testing --- test/utils/docker/ubuntu1204/Dockerfile | 61 +++++++++++++++++++++ test/utils/docker/ubuntu1204/init-fake.conf | 13 +++++ 2 files changed, 74 insertions(+) create mode 100644 test/utils/docker/ubuntu1204/Dockerfile create mode 100644 test/utils/docker/ubuntu1204/init-fake.conf diff --git a/test/utils/docker/ubuntu1204/Dockerfile b/test/utils/docker/ubuntu1204/Dockerfile new file mode 100644 index 00000000000..289837c3f0e --- /dev/null +++ b/test/utils/docker/ubuntu1204/Dockerfile @@ -0,0 +1,61 @@ +FROM ubuntu:precise +RUN apt-get clean; apt-get update -y; +RUN apt-get install -y \ + debianutils \ + gawk \ + git \ + locales \ + make \ + mercurial \ + ruby \ + subversion \ + sudo \ + unzip + +# helpful things taken from the ubuntu-upstart Dockerfile: +# https://github.com/tianon/dockerfiles/blob/4d24a12b54b75b3e0904d8a285900d88d3326361/sbin-init/ubuntu/upstart/14.04/Dockerfile +ADD init-fake.conf /etc/init/fake-container-events.conf + +# undo some leet hax of the base image +RUN rm /usr/sbin/policy-rc.d; \ + rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl +# remove some pointless services +RUN /usr/sbin/update-rc.d -f ondemand remove; \ + for f in \ + /etc/init/u*.conf \ + /etc/init/mounted-dev.conf \ + /etc/init/mounted-proc.conf \ + /etc/init/mounted-run.conf \ + /etc/init/mounted-tmp.conf \ + /etc/init/mounted-var.conf \ + /etc/init/hostname.conf \ + /etc/init/networking.conf \ + /etc/init/tty*.conf \ + /etc/init/plymouth*.conf \ + /etc/init/hwclock*.conf \ + /etc/init/module*.conf\ + ; do \ + dpkg-divert --local --rename --add "$f"; \ + done; \ + echo '# /lib/init/fstab: cleared out for bare-bones Docker' > /lib/init/fstab +# end things from ubuntu-upstart Dockerfile + +RUN apt-get install -y \ + python-coverage \ + python-httplib2 \ + python-jinja2 \ + python-keyczar \ + python-mock \ + python-nose \ + python-paramiko \ + python-pip \ + python-setuptools \ + python-virtualenv \ + python-yaml +RUN rm /etc/apt/apt.conf.d/docker-clean +RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers +RUN mkdir /etc/ansible/ +RUN /bin/echo -e "[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts +RUN locale-gen en_US.UTF-8 +ENV container docker +CMD ["/sbin/init"] diff --git a/test/utils/docker/ubuntu1204/init-fake.conf b/test/utils/docker/ubuntu1204/init-fake.conf new file mode 100644 index 00000000000..f5db965051e --- /dev/null +++ b/test/utils/docker/ubuntu1204/init-fake.conf @@ -0,0 +1,13 @@ +# fake some events needed for correct startup other services + +description "In-Container Upstart Fake Events" + +start on startup + +script + rm -rf /var/run/*.pid + rm -rf /var/run/network/* + /sbin/initctl emit stopped JOB=udevtrigger --no-wait + /sbin/initctl emit started JOB=udev --no-wait + /sbin/initctl emit runlevel RUNLEVEL=3 --no-wait +end script From 89cace28ea810bde13cbb1583e1b6f54db6f87ef Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 13 Mar 2016 03:30:55 -0400 Subject: [PATCH 0957/1113] Adding ubuntu1204 to the travis config for testing --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index f90bf997b5b..ff98c4c34a2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,7 @@ matrix: - env: TARGET=centos7 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" - env: TARGET=fedora23 TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" - env: TARGET=fedora-rawhide TARGET_OPTIONS="--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" + - env: TARGET=ubuntu1204 - env: TARGET=ubuntu1404 addons: apt: From d7f1d865ec190a394cd1bf717cc1344685763b07 Mon Sep 17 00:00:00 2001 From: Michael Scherer <misc@zarb.org> Date: Sun, 13 Mar 2016 09:50:52 +0100 Subject: [PATCH 0958/1113] Fix pygments lexer name This prevent the build from sending warnings like this: YAMLSyntax.rst:28: WARNING: Pygments lexer name 'YAML' is not known and actually show real warnings and issues in the documentation --- docsite/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/conf.py b/docsite/conf.py index 95bc1fb8328..b300813b255 100644 --- a/docsite/conf.py +++ b/docsite/conf.py @@ -100,7 +100,7 @@ exclude_patterns = ['modules'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' -highlight_language = 'YAML' +highlight_language = 'yaml' # Options for HTML output From 8b808cab9efe64fc55436eebbb14a270d4683c43 Mon Sep 17 00:00:00 2001 From: Michael Scherer <misc@zarb.org> Date: Sun, 13 Mar 2016 09:49:59 +0100 Subject: [PATCH 0959/1113] Add doc on ansible_version, fix #13204 --- docsite/rst/playbooks_variables.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index e0fb548d3b7..ae839663d4e 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -495,6 +495,24 @@ Here is an example of what that might look like:: In this pattern however, you could also write a fact module as well, and may wish to consider this as an option. +.. _ansible_version: + +Ansible version +``````````````` + +.. versionadded:: 2.0 + +To adapt playbook behavior to specific version of ansible, a variable ansible_version is available, with the following +structure:: + + "ansible_version": { + "full": "2.0.0.2", + "major": 2, + "minor": 0, + "revision": 0, + "string": "2.0.0.2" + } + .. _fact_caching: Fact Caching From b3d600f1ec61399db7415676e5d3e0f097b7af0b Mon Sep 17 00:00:00 2001 From: Michael Scherer <misc@zarb.org> Date: Sun, 13 Mar 2016 10:03:57 +0100 Subject: [PATCH 0960/1113] Fix the lexer used, jinja2 is not valid, jinja is --- docsite/rst/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index ab8e5bc201c..15ebbc15f92 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -177,7 +177,7 @@ How do I loop over a list of hosts in a group, inside of a template? A pretty common pattern is to iterate over a list of hosts inside of a host group, perhaps to populate a template configuration file with a list of servers. To do this, you can just access the "$groups" dictionary in your template, like this: -.. code-block:: jinja2 +.. code-block:: jinja {% for host in groups['db_servers'] %} {{ host }} From ff5584a349a6be7b7289ea8cee275f50529a41aa Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 13 Mar 2016 08:27:52 -0400 Subject: [PATCH 0961/1113] Updating jinja2 in ubuntu1204 docker image --- test/utils/docker/ubuntu1204/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/test/utils/docker/ubuntu1204/Dockerfile b/test/utils/docker/ubuntu1204/Dockerfile index 289837c3f0e..d4952fdac2d 100644 --- a/test/utils/docker/ubuntu1204/Dockerfile +++ b/test/utils/docker/ubuntu1204/Dockerfile @@ -52,6 +52,7 @@ RUN apt-get install -y \ python-setuptools \ python-virtualenv \ python-yaml +RUN RUN pip install --upgrade jinja2 RUN rm /etc/apt/apt.conf.d/docker-clean RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ From af282eb979b2790dfcd3250406aad962893bd019 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 13 Mar 2016 08:31:47 -0400 Subject: [PATCH 0962/1113] Fixing typo in ubuntu1204 docker config --- test/utils/docker/ubuntu1204/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/docker/ubuntu1204/Dockerfile b/test/utils/docker/ubuntu1204/Dockerfile index d4952fdac2d..181d0ed36a1 100644 --- a/test/utils/docker/ubuntu1204/Dockerfile +++ b/test/utils/docker/ubuntu1204/Dockerfile @@ -52,7 +52,7 @@ RUN apt-get install -y \ python-setuptools \ python-virtualenv \ python-yaml -RUN RUN pip install --upgrade jinja2 +RUN pip install --upgrade jinja2 RUN rm /etc/apt/apt.conf.d/docker-clean RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ From 90afc5e7d2385e82f8c8672c82d5272ebd01d0e6 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 13 Mar 2016 08:49:52 -0400 Subject: [PATCH 0963/1113] Adding rubygems to the ubuntu1204 docker config [no ci] --- test/utils/docker/ubuntu1204/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/test/utils/docker/ubuntu1204/Dockerfile b/test/utils/docker/ubuntu1204/Dockerfile index 181d0ed36a1..cd15475d48f 100644 --- a/test/utils/docker/ubuntu1204/Dockerfile +++ b/test/utils/docker/ubuntu1204/Dockerfile @@ -8,6 +8,7 @@ RUN apt-get install -y \ make \ mercurial \ ruby \ + rubygems \ subversion \ sudo \ unzip From 9d459386ceb9e9800a6ddda50cd4c06f21a8d920 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 13 Mar 2016 09:14:15 -0400 Subject: [PATCH 0964/1113] Also adding an upgrade of pycrypto to ubuntu1204 config --- test/utils/docker/ubuntu1204/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/docker/ubuntu1204/Dockerfile b/test/utils/docker/ubuntu1204/Dockerfile index cd15475d48f..79542d5d224 100644 --- a/test/utils/docker/ubuntu1204/Dockerfile +++ b/test/utils/docker/ubuntu1204/Dockerfile @@ -53,7 +53,7 @@ RUN apt-get install -y \ python-setuptools \ python-virtualenv \ python-yaml -RUN pip install --upgrade jinja2 +RUN pip install --upgrade jinja2 pycrypto RUN rm /etc/apt/apt.conf.d/docker-clean RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ From 2fa1936ff92e45d59b3f2f9b33caa7483f73090f Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 13 Mar 2016 09:21:10 -0400 Subject: [PATCH 0965/1113] Adding python-dev to the list of packages for ubuntu1204 docker [ci skip] --- test/utils/docker/ubuntu1204/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/test/utils/docker/ubuntu1204/Dockerfile b/test/utils/docker/ubuntu1204/Dockerfile index 79542d5d224..347f9116136 100644 --- a/test/utils/docker/ubuntu1204/Dockerfile +++ b/test/utils/docker/ubuntu1204/Dockerfile @@ -43,6 +43,7 @@ RUN /usr/sbin/update-rc.d -f ondemand remove; \ RUN apt-get install -y \ python-coverage \ + python-dev \ python-httplib2 \ python-jinja2 \ python-keyczar \ From 249caac0d30c21e47d756c4484283ca8e9f90c12 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Sun, 13 Mar 2016 16:01:58 -0700 Subject: [PATCH 0966/1113] feature to allow prompts to be configured at instatiation This commit adds a new feature to allow implementations of shell to specify the command prompt regexp to be used. It allows adds a new kwarg at instantiation to kick the remote device with a carriage return. By default the kickstart flag is true but can be disabled by passing kickstart=False. --- lib/ansible/module_utils/shell.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/shell.py b/lib/ansible/module_utils/shell.py index 2e49f279b5b..ceb7490e6e4 100644 --- a/lib/ansible/module_utils/shell.py +++ b/lib/ansible/module_utils/shell.py @@ -79,17 +79,15 @@ class Command(object): class Shell(object): - def __init__(self): + def __init__(self, prompts_re=None, errors_re=None, kickstart=True): self.ssh = None self.shell = None + self.kickstart = kickstart self._matched_prompt = None - self.prompts = list() - self.prompts.extend(CLI_PROMPTS_RE) - - self.errors = list() - self.errors.extend(CLI_ERRORS_RE) + self.prompts = prompts_re or CLI_PROMPTS_RE + self.errors = errors_re or CLI_ERRORS_RE def open(self, host, port=22, username=None, password=None, timeout=10, key_filename=None, pkey=None, look_for_keys=None, @@ -109,7 +107,10 @@ class Shell(object): self.shell = self.ssh.invoke_shell() self.shell.settimeout(10) - self.shell.sendall("\n") + + if self.kickstart: + self.shell.sendall("\n") + self.receive() def strip(self, data): From 5338d6af286f7cc108e3e80565b939fdd0a7ee79 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Sun, 13 Mar 2016 16:04:56 -0700 Subject: [PATCH 0967/1113] feature in ios to tell shell not to kickstart This commit is necessary to tell shell not to kickstart the cli session as it causes problems in IOS to recognize the prompt. --- lib/ansible/module_utils/ios.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/ios.py b/lib/ansible/module_utils/ios.py index 00d746f0e0f..f8c1e256bd4 100644 --- a/lib/ansible/module_utils/ios.py +++ b/lib/ansible/module_utils/ios.py @@ -52,7 +52,7 @@ class Cli(object): username = self.module.params['username'] password = self.module.params['password'] - self.shell = Shell() + self.shell = Shell(kickstart=False) try: self.shell.open(host, port=port, username=username, password=password) From d211b4c9623474284540d880f703c03cf3a27444 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Sun, 13 Mar 2016 16:06:25 -0700 Subject: [PATCH 0968/1113] feature to implement localized cli prompts to eos This change localizes the cli prompt regexps to the eos shared module instead of the common prompts implemented in shell --- lib/ansible/module_utils/eos.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/eos.py b/lib/ansible/module_utils/eos.py index 41dfbaeadd3..37e62a4de27 100644 --- a/lib/ansible/module_utils/eos.py +++ b/lib/ansible/module_utils/eos.py @@ -31,6 +31,23 @@ NET_COMMON_ARGS = dict( provider=dict(type='dict') ) +CLI_PROMPTS_RE = [ + re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), + re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$") +] + +CLI_ERRORS_RE = [ + re.compile(r"% ?Error"), + re.compile(r"^% \w+", re.M), + re.compile(r"% ?Bad secret"), + re.compile(r"invalid input", re.I), + re.compile(r"(?:incomplete|ambiguous) command", re.I), + re.compile(r"connection timed out", re.I), + re.compile(r"[^\r\n]+ not found", re.I), + re.compile(r"'[^']' +returned error code: ?\d+"), + re.compile(r"[^\r\n]\/bin\/(?:ba)?sh") +] + def to_list(val): if isinstance(val, (list, tuple)): return list(val) @@ -123,9 +140,8 @@ class Cli(object): username = self.module.params['username'] password = self.module.params['password'] - self.shell = Shell() - try: + self.shell = Shell(CLI_PROMPTS_RE, CLI_ERRORS_RE) self.shell.open(host, port=port, username=username, password=password) except Exception, exc: msg = 'failed to connecto to %s:%s - %s' % (host, port, str(exc)) From 81788e627dc10a96c4c43b0628f485a4ab901d7d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 14 Mar 2016 01:33:34 -0400 Subject: [PATCH 0969/1113] Adding unit tests for TaskResult --- test/units/executor/test_task_result.py | 130 ++++++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 test/units/executor/test_task_result.py diff --git a/test/units/executor/test_task_result.py b/test/units/executor/test_task_result.py new file mode 100644 index 00000000000..a0af67edbd1 --- /dev/null +++ b/test/units/executor/test_task_result.py @@ -0,0 +1,130 @@ +# (c) 2016, James Cammarata <jimi@sngx.net> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + +from ansible.executor.task_result import TaskResult + +class TestTaskResult(unittest.TestCase): + def test_task_result_basic(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # test loading a result with a dict + tr = TaskResult(mock_host, mock_task, dict()) + + # test loading a result with a JSON string + with patch('ansible.parsing.dataloader.DataLoader.load') as p: + tr = TaskResult(mock_host, mock_task, '{}') + + def test_task_result_is_changed(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # test with no changed in result + tr = TaskResult(mock_host, mock_task, dict()) + self.assertFalse(tr.is_changed()) + + # test with changed in the result + tr = TaskResult(mock_host, mock_task, dict(changed=True)) + self.assertTrue(tr.is_changed()) + + # test with multiple results but none changed + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True])) + self.assertFalse(tr.is_changed()) + + # test with multiple results and one changed + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(changed=False), dict(changed=True), dict(some_key=False)])) + self.assertTrue(tr.is_changed()) + + def test_task_result_is_skipped(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # test with no skipped in result + tr = TaskResult(mock_host, mock_task, dict()) + self.assertFalse(tr.is_skipped()) + + # test with skipped in the result + tr = TaskResult(mock_host, mock_task, dict(skipped=True)) + self.assertTrue(tr.is_skipped()) + + # test with multiple results but none skipped + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True])) + self.assertFalse(tr.is_skipped()) + + # test with multiple results and one skipped + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=False), dict(skipped=True), dict(some_key=False)])) + self.assertFalse(tr.is_skipped()) + + # test with multiple results and all skipped + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=True), dict(skipped=True), dict(skipped=True)])) + self.assertTrue(tr.is_skipped()) + + def test_task_result_is_unreachable(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # test with no unreachable in result + tr = TaskResult(mock_host, mock_task, dict()) + self.assertFalse(tr.is_unreachable()) + + # test with unreachable in the result + tr = TaskResult(mock_host, mock_task, dict(unreachable=True)) + self.assertTrue(tr.is_unreachable()) + + # test with multiple results but none unreachable + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True])) + self.assertFalse(tr.is_unreachable()) + + # test with multiple results and one unreachable + mock_task.loop = 'foo' + tr = TaskResult(mock_host, mock_task, dict(results=[dict(unreachable=False), dict(unreachable=True), dict(some_key=False)])) + self.assertTrue(tr.is_unreachable()) + + def test_task_result_is_failed(self): + mock_host = MagicMock() + mock_task = MagicMock() + + # test with no failed in result + tr = TaskResult(mock_host, mock_task, dict()) + self.assertFalse(tr.is_failed()) + + # test failed result with rc values + tr = TaskResult(mock_host, mock_task, dict(rc=0)) + self.assertFalse(tr.is_failed()) + tr = TaskResult(mock_host, mock_task, dict(rc=1)) + self.assertTrue(tr.is_failed()) + + # test with failed in result + tr = TaskResult(mock_host, mock_task, dict(failed=True)) + self.assertTrue(tr.is_failed()) + + # test with failed_when in result + tr = TaskResult(mock_host, mock_task, dict(failed_when_result=True)) + self.assertTrue(tr.is_failed()) From 2984ffdfac845290c8a89802b6308ae5b130a36a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 14 Mar 2016 13:13:50 -0400 Subject: [PATCH 0970/1113] now item callback honors display_skipped_hosts fixes #14956 --- lib/ansible/plugins/callback/default.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index e4f583db3d7..1bcb4b244db 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -206,10 +206,11 @@ class CallbackModule(CallbackBase): self._handle_warnings(result._result) def v2_playbook_item_on_skipped(self, result): - msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result)) - if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: - msg += " => %s" % self._dump_results(result._result) - self._display.display(msg, color=C.COLOR_SKIP) + if C.DISPLAY_SKIPPED_HOSTS: + msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result)) + if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: + msg += " => %s" % self._dump_results(result._result) + self._display.display(msg, color=C.COLOR_SKIP) def v2_playbook_on_include(self, included_file): msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) From d180e0e05ff52d03aa6c758e35b4111a2cdd3ac9 Mon Sep 17 00:00:00 2001 From: Andre Keedy <andre.keedy@emc.com> Date: Mon, 14 Mar 2016 16:26:52 -0400 Subject: [PATCH 0971/1113] Bug Fix -Corrected the format in case of multiple hosts Remove empty spaces --- contrib/inventory/rackhd.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/contrib/inventory/rackhd.py b/contrib/inventory/rackhd.py index e7db24d3204..92abc4d6a67 100755 --- a/contrib/inventory/rackhd.py +++ b/contrib/inventory/rackhd.py @@ -13,8 +13,10 @@ class RackhdInventory(object): self._inventory = {} for nodeid in nodeids: self._load_inventory_data(nodeid) + inventory = {} for nodeid,info in self._inventory.iteritems(): - print(json.dumps(self._format_output(nodeid, info))) + inventory[nodeid]= (self._format_output(nodeid, info)) + print(json.dumps(inventory)) def _load_inventory_data(self, nodeid): info = {} @@ -33,10 +35,10 @@ class RackhdInventory(object): ipaddress = '' if len(node_info) > 0: ipaddress = node_info[0]['ipAddress'] - output = {nodeid:{ 'hosts':[ipaddress],'vars':{}}} + output = { 'hosts':[ipaddress],'vars':{}} for key,result in info.iteritems(): - output[nodeid]['vars'][key] = json.loads(result) - output[nodeid]['vars']['ansible_ssh_user'] = 'monorail' + output['vars'][key] = json.loads(result) + output['vars']['ansible_ssh_user'] = 'monorail' except KeyError: pass return output From 88772b60035bd4bc06629aa8cc5cf76123d447f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= <yannig.perre@gmail.com> Date: Sat, 12 Mar 2016 10:22:49 +0100 Subject: [PATCH 0972/1113] Add a way to restrict gathered facts in Ansible: - Using gather_subset options - By ignoring ohai/chef or facter/puppet facts --- docsite/rst/glossary.rst | 3 +- docsite/rst/intro_configuration.rst | 23 +++++++++++- examples/ansible.cfg | 15 ++++++++ lib/ansible/constants.py | 3 ++ lib/ansible/executor/play_iterator.py | 21 ++++++++++- lib/ansible/module_utils/facts.py | 29 +++++++++++++-- lib/ansible/playbook/play.py | 3 ++ test/integration/test_gathering_facts.yml | 45 +++++++++++++++++++++++ 8 files changed, 133 insertions(+), 9 deletions(-) create mode 100644 test/integration/test_gathering_facts.yml diff --git a/docsite/rst/glossary.rst b/docsite/rst/glossary.rst index d05481a621e..7c5bd5f812a 100644 --- a/docsite/rst/glossary.rst +++ b/docsite/rst/glossary.rst @@ -71,8 +71,7 @@ Facts Facts are simply things that are discovered about remote nodes. While they can be used in playbooks and templates just like variables, facts are things that are inferred, rather than set. Facts are automatically discovered by Ansible when running plays by executing the internal 'setup' module on the remote nodes. You never have to call the setup module explicitly, it just runs, but it can be disabled to save time if it is -not needed. For the convenience of users who are switching from other configuration management systems, the fact module will also pull in facts from the 'ohai' and 'facter' -tools if they are installed, which are fact libraries from Chef and Puppet, respectively. +not needed or to reduce to a subset. For the convenience of users who are switching from other configuration management systems, the fact module will also pull in facts from the 'ohai' and 'facter' tools if they are installed, which are fact libraries from Chef and Puppet, respectively. You can also ignore them and save time at runtime execution. Filter Plugin +++++++++++++ diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 0bc6fbfad7b..ddcf6736188 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -353,6 +353,25 @@ This option can be useful for those wishing to save fact gathering time. Both 's gathering = smart +.. versionadded:: 2.1 + +You can specify a subset of gathered facts using the following options: + + gather_subset = all + +:all: gather all subsets +:min: gather a very limited set of facts +:network: gather min and network facts +:hardware: gather min and hardware facts (longest facts to retrieve) +:virtual: gather min and virtual facts + +You can combine them using comma separated list (ex: min,network,virtual) + +You can also disable puppet facter or chef ohai facts collection using following options: + + ignore_ohai = True + ignore_facter = True + hash_behaviour ============== @@ -367,7 +386,7 @@ official examples repos do not use this setting:: The valid values are either 'replace' (the default) or 'merge'. -.. versionadded: '2.0' +.. versionadded:: 2.0 If you want to merge hashes without changing the global settings, use the `combine` filter described in :doc:`playbooks_filters`. @@ -585,7 +604,7 @@ The directory will be created if it does not already exist. roles_path ========== -.. versionadded: '1.4' +.. versionadded:: 1.4 The roles path indicate additional directories beyond the 'roles/' subdirectory of a playbook project to search to find Ansible roles. For instance, if there was a source control repository of common roles and a different repository of playbooks, you might diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 8465ccca4bb..181630f9c64 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -31,6 +31,21 @@ # explicit - do not gather by default, must say gather_facts: True #gathering = implicit +# by default retrieve all facts subsets +# all - gather all subsets +# min - gather a very limited set of facts +# network - gather min and network facts +# hardware - gather hardware facts (longest facts to retrieve) +# virtual - gather min and virtual facts +# You can combine them using comma (ex: min,network,virtual) +#gather_subset = all + +# by default run ohai +#ignore_ohai = False + +# by default run facter +#ignore_facter = False + # additional paths to search for roles in, colon separated #roles_path = /etc/ansible/roles diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 4def61e1aa0..7e91233fcae 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -156,6 +156,9 @@ DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBL DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None) DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() +DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GATHER_SUBSET', 'all').lower() +DEFAULT_IGNORE_OHAI = get_config(p, DEFAULTS, 'ignore_ohai', 'ANSIBLE_IGNORE_OHAI', False, boolean=True) +DEFAULT_IGNORE_FACTER = get_config(p, DEFAULTS, 'ignore_facter', 'ANSIBLE_IGNORE_FACTER', False, boolean=True) DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', ispath=True) DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 83abb40bbc1..93321ce8ae6 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -151,11 +151,30 @@ class PlayIterator: self._play = play self._blocks = [] + # Default options to gather + gather_subset = C.DEFAULT_GATHER_SUBSET + ignore_ohai = C.DEFAULT_IGNORE_OHAI + ignore_facter = C.DEFAULT_IGNORE_FACTER + + # Retrieve subset to gather + if self._play.gather_subset is not None: + gather_subset = self._play.gather_subset + # ignore ohai + if self._play.ignore_ohai is not None: + ignore_ohai = self._play.ignore_ohai + # ignore puppet facter + if self._play.ignore_facter is not None: + ignore_facter = self._play.ignore_facter + setup_block = Block(play=self._play) setup_task = Task(block=setup_block) setup_task.action = 'setup' setup_task.tags = ['always'] - setup_task.args = {} + setup_task.args = { + 'gather_subset': gather_subset, + 'ignore_ohai' : ignore_ohai, + 'ignore_facter': ignore_facter, + } setup_task.set_loader(self._play._loader) setup_block.block = [setup_task] diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 1aa16c9feeb..d683ee2a260 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -159,6 +159,9 @@ class Facts(object): { 'path' : '/usr/local/sbin/pkg', 'name' : 'pkgng' }, ] + # Allowed fact subset for gather_subset options + ALLOWED_FACT_SUBSET = frozenset([ 'all', 'min', 'network', 'hardware', 'virtual' ]) + def __init__(self, load_on_init=True): self.facts = {} @@ -3067,15 +3070,33 @@ def get_file_lines(path): return ret def ansible_facts(module): + # Retrieve module parameters + gather_subset = [ 'all' ] + if 'gather_subset' in module.params: + gather_subset = module.params['gather_subset'] + + # Retrieve all facts elements + if 'all' in gather_subset: + gather_subset = [ 'min', 'hardware', 'network', 'virtual' ] + + # Check subsets and forbid unallowed name + for subset in gather_subset: + if subset not in Facts.ALLOWED_FACT_SUBSET: + raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: %s" % (subset, ", ".join(Facts.ALLOWED_FACT_SUBSET))) + facts = {} facts.update(Facts().populate()) - facts.update(Hardware().populate()) - facts.update(Network(module).populate()) - facts.update(Virtual().populate()) + if 'hardware' in gather_subset: + facts.update(Hardware().populate()) + if 'network' in gather_subset: + facts.update(Network(module).populate()) + if 'virtual' in gather_subset: + facts.update(Virtual().populate()) + facts['gather_subset'] = gather_subset return facts # =========================================== - +# TODO: remove this dead code? def get_all_facts(module): setup_options = dict(module_setup=True) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index c354b745496..c001419732a 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,6 +64,9 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) + _gather_subset = FieldAttribute(isa='string', default=None, always_post_validate=True) + _ignore_facter = FieldAttribute(isa='bool', default=None, always_post_validate=True) + _ignore_ohai = FieldAttribute(isa='bool', default=None, always_post_validate=True) _hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) diff --git a/test/integration/test_gathering_facts.yml b/test/integration/test_gathering_facts.yml new file mode 100644 index 00000000000..03d707fbf1a --- /dev/null +++ b/test/integration/test_gathering_facts.yml @@ -0,0 +1,45 @@ +--- + +- hosts: localhost + tags: [ 'min' ] + connection: local + gather_subset: "min" + ignore_facter: yes + ignore_ohai: yes + gather_facts: yes + tasks: + - debug: var={{item}} + with_items: [ 'ansible_user_id', 'ansible_interfaces', 'ansible_mounts', 'ansible_virtualization_role' ] + +- hosts: localhost + tags: [ 'network' ] + connection: local + gather_subset: "network" + ignore_facter: yes + ignore_ohai: yes + gather_facts: yes + tasks: + - debug: var={{item}} + with_items: [ 'ansible_user_id', 'ansible_interfaces', 'ansible_mounts', 'ansible_virtualization_role' ] + +- hosts: localhost + tags: [ 'hardware' ] + connection: local + gather_subset: "hardware" + ignore_facter: yes + ignore_ohai: yes + gather_facts: yes + tasks: + - debug: var={{item}} + with_items: [ 'ansible_user_id', 'ansible_interfaces', 'ansible_mounts', 'ansible_virtualization_role' ] + +- hosts: localhost + tags: [ 'virtual' ] + connection: local + gather_subset: "virtual" + ignore_facter: yes + ignore_ohai: yes + gather_facts: yes + tasks: + - debug: var={{item}} + with_items: [ 'ansible_user_id', 'ansible_interfaces', 'ansible_mounts', 'ansible_virtualization_role' ] From d665911bab1e657169843e3fce99058902d213a8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 14 Mar 2016 10:32:50 -0700 Subject: [PATCH 0973/1113] Allow FieldAttribute lists and sets to be set from a comma separated string --- lib/ansible/playbook/base.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py index d3752adf2f3..c9fd2e84b45 100644 --- a/lib/ansible/playbook/base.py +++ b/lib/ansible/playbook/base.py @@ -334,7 +334,10 @@ class Base: if value is None: value = [] elif not isinstance(value, list): - value = [ value ] + if isinstance(value, string_types): + value = value.split(',') + else: + value = [ value ] if attribute.listof is not None: for item in value: if not isinstance(item, attribute.listof): @@ -346,11 +349,15 @@ class Base: elif attribute.isa == 'set': if value is None: value = set() - else: - if not isinstance(value, (list, set)): + elif not isinstance(value, (list, set)): + if isinstance(value, string_types): + value = value.split(',') + else: + # Making a list like this handles strings of + # text and bytes properly value = [ value ] - if not isinstance(value, set): - value = set(value) + if not isinstance(value, set): + value = set(value) elif attribute.isa == 'dict': if value is None: value = dict() From 5a1e35224b82e8f0776dfea3d34bcb005195bbdb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 14 Mar 2016 09:45:28 -0700 Subject: [PATCH 0974/1113] Make changes proposed during review of restrict fact gathering feature: * Make documentation examples into code blocks * Make code to call the subsets more general. * Made min subset always execute (cannot disable it). * Use a passed in modules parameter rather than global modules. This is needed for ziploader * Remove unneeded __init__() * Remove uneeded multiple inheritance from a base class * gather_facts is now a list type --- docsite/rst/intro_configuration.rst | 4 +- lib/ansible/module_utils/facts.py | 312 ++++++++++++---------------- lib/ansible/playbook/play.py | 2 +- 3 files changed, 131 insertions(+), 187 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index ddcf6736188..1f7c6ca3131 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -355,7 +355,7 @@ This option can be useful for those wishing to save fact gathering time. Both 's .. versionadded:: 2.1 -You can specify a subset of gathered facts using the following options: +You can specify a subset of gathered facts using the following options:: gather_subset = all @@ -367,7 +367,7 @@ You can specify a subset of gathered facts using the following options: You can combine them using comma separated list (ex: min,network,virtual) -You can also disable puppet facter or chef ohai facts collection using following options: +You can also disable puppet facter or chef ohai facts collection using following options:: ignore_ohai = True ignore_facter = True diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index d683ee2a260..d38ce9207f1 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -159,11 +159,9 @@ class Facts(object): { 'path' : '/usr/local/sbin/pkg', 'name' : 'pkgng' }, ] - # Allowed fact subset for gather_subset options - ALLOWED_FACT_SUBSET = frozenset([ 'all', 'min', 'network', 'hardware', 'virtual' ]) - - def __init__(self, load_on_init=True): + def __init__(self, module, load_on_init=True): + self.module = module self.facts = {} if load_on_init: @@ -218,14 +216,14 @@ class Facts(object): elif self.facts['system'] == 'AIX': # Attempt to use getconf to figure out architecture # fall back to bootinfo if needed - if module.get_bin_path('getconf'): - rc, out, err = module.run_command([module.get_bin_path('getconf'), - 'MACHINE_ARCHITECTURE']) + getconf_bin = self.module.get_bin_path('getconf') + if getconf_bin: + rc, out, err = self.module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE']) data = out.split('\n') self.facts['architecture'] = data[0] else: - rc, out, err = module.run_command([module.get_bin_path('bootinfo'), - '-p']) + bootinfo_bin = self.module.get_bin_path('bootinfo') + rc, out, err = self.module.run_command([bootinfo_bin, '-p']) data = out.split('\n') self.facts['architecture'] = data[0] elif self.facts['system'] == 'OpenBSD': @@ -233,7 +231,7 @@ class Facts(object): def get_local_facts(self): - fact_path = module.params.get('fact_path', None) + fact_path = self.module.params.get('fact_path', None) if not fact_path or not os.path.exists(fact_path): return @@ -246,7 +244,7 @@ class Facts(object): # try to read it as json first # if that fails read it with ConfigParser # if that fails, skip it - rc, out, err = module.run_command(fn) + rc, out, err = self.module.run_command(fn) else: out = get_file_content(fn, default='') @@ -297,20 +295,20 @@ class Facts(object): # as it's much cleaner than this massive if-else if self.facts['system'] == 'AIX': self.facts['distribution'] = 'AIX' - rc, out, err = module.run_command("/usr/bin/oslevel") + rc, out, err = self.module.run_command("/usr/bin/oslevel") data = out.split('.') self.facts['distribution_version'] = data[0] self.facts['distribution_release'] = data[1] elif self.facts['system'] == 'HP-UX': self.facts['distribution'] = 'HP-UX' - rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True) data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out) if data: self.facts['distribution_version'] = data.groups()[0] self.facts['distribution_release'] = data.groups()[1] elif self.facts['system'] == 'Darwin': self.facts['distribution'] = 'MacOSX' - rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion") + rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion") data = out.split()[-1] self.facts['distribution_version'] = data elif self.facts['system'] == 'FreeBSD': @@ -324,7 +322,7 @@ class Facts(object): elif self.facts['system'] == 'OpenBSD': self.facts['distribution'] = 'OpenBSD' self.facts['distribution_release'] = platform.release() - rc, out, err = module.run_command("/sbin/sysctl -n kern.version") + rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version") match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out) if match: self.facts['distribution_version'] = match.groups()[0] @@ -414,7 +412,7 @@ class Facts(object): self.facts['distribution_release'] = ora_prefix + data break - uname_rc, uname_out, uname_err = module.run_command(['uname', '-v']) + uname_rc, uname_out, uname_err = self.module.run_command(['uname', '-v']) distribution_version = None if 'SmartOS' in data: self.facts['distribution'] = 'SmartOS' @@ -580,7 +578,7 @@ class Facts(object): # try various forms of querying pid 1 proc_1 = get_file_content('/proc/1/comm') if proc_1 is None: - rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True) + rc, proc_1, err = self.module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True) else: proc_1 = os.path.basename(proc_1) @@ -613,7 +611,7 @@ class Facts(object): elif self.facts['system'] == 'Linux': if self._check_systemd(): self.facts['service_mgr'] = 'systemd' - elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"): + elif self.module.get_bin_path('initctl') and os.path.exists("/etc/init/"): self.facts['service_mgr'] = 'upstart' elif os.path.realpath('/sbin/rc') == '/sbin/openrc': self.facts['service_mgr'] = 'openrc' @@ -625,9 +623,9 @@ class Facts(object): self.facts['service_mgr'] = 'service' def get_lsb_facts(self): - lsb_path = module.get_bin_path('lsb_release') + lsb_path = self.module.get_bin_path('lsb_release') if lsb_path: - rc, out, err = module.run_command([lsb_path, "-a"]) + rc, out, err = self.module.run_command([lsb_path, "-a"]) if rc == 0: self.facts['lsb'] = {} for line in out.split('\n'): @@ -735,7 +733,7 @@ class Facts(object): def _check_systemd(self): # tools must be installed - if module.get_bin_path('systemctl'): + if self.module.get_bin_path('systemctl'): # this should show if systemd is the boot init system, if checking init faild to mark as systemd # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html @@ -845,9 +843,6 @@ class Hardware(Facts): subclass = sc return super(cls, subclass).__new__(subclass, *arguments, **keyword) - def __init__(self): - Facts.__init__(self) - def populate(self): return self.facts @@ -872,9 +867,6 @@ class LinuxHardware(Hardware): # Now we have all of these in a dict structure MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached')) - def __init__(self): - Hardware.__init__(self) - def populate(self): self.get_cpu_facts() self.get_memory_facts() @@ -1058,7 +1050,7 @@ class LinuxHardware(Hardware): else: # Fall back to using dmidecode, if available - dmi_bin = module.get_bin_path('dmidecode') + dmi_bin = self.module.get_bin_path('dmidecode') DMI_DICT = { 'bios_date': 'bios-release-date', 'bios_version': 'bios-version', @@ -1071,7 +1063,7 @@ class LinuxHardware(Hardware): } for (k, v) in DMI_DICT.items(): if dmi_bin is not None: - (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v)) + (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v)) if rc == 0: # Strip out commented lines (specific dmidecode output) thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ]) @@ -1100,9 +1092,9 @@ class LinuxHardware(Hardware): uuid = uuids[fields[0]] else: uuid = 'NA' - lsblkPath = module.get_bin_path("lsblk") + lsblkPath = self.module.get_bin_path("lsblk") if lsblkPath: - rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True) + rc, out, err = self.module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True) if rc == 0: uuid = out.strip() @@ -1121,9 +1113,9 @@ class LinuxHardware(Hardware): def get_device_facts(self): self.facts['devices'] = {} - lspci = module.get_bin_path('lspci') + lspci = self.module.get_bin_path('lspci') if lspci: - rc, pcidata, err = module.run_command([lspci, '-D']) + rc, pcidata, err = self.module.run_command([lspci, '-D']) else: pcidata = None @@ -1176,7 +1168,7 @@ class LinuxHardware(Hardware): part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size") if not part['sectorsize']: part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512) - part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize']))) + part['size'] = self.module.pretty_bytes((float(part['sectors']) * float(part['sectorsize']))) d['partitions'][partname] = part d['rotational'] = get_file_content(sysdir + "/queue/rotational") @@ -1193,7 +1185,7 @@ class LinuxHardware(Hardware): d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size") if not d['sectorsize']: d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512) - d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize'])) + d['size'] = self.module.pretty_bytes(float(d['sectors']) * float(d['sectorsize'])) d['host'] = "" @@ -1228,14 +1220,14 @@ class LinuxHardware(Hardware): def get_lvm_facts(self): """ Get LVM Facts if running as root and lvm utils are available """ - if os.getuid() == 0 and module.get_bin_path('vgs'): + if os.getuid() == 0 and self.module.get_bin_path('vgs'): lvm_util_options = '--noheadings --nosuffix --units g' - vgs_path = module.get_bin_path('vgs') + vgs_path = self.module.get_bin_path('vgs') #vgs fields: VG #PV #LV #SN Attr VSize VFree vgs={} if vgs_path: - rc, vg_lines, err = module.run_command( '%s %s' % (vgs_path, lvm_util_options)) + rc, vg_lines, err = self.module.run_command( '%s %s' % (vgs_path, lvm_util_options)) for vg_line in vg_lines.splitlines(): items = vg_line.split() vgs[items[0]] = {'size_g':items[-2], @@ -1243,12 +1235,12 @@ class LinuxHardware(Hardware): 'num_lvs': items[2], 'num_pvs': items[1]} - lvs_path = module.get_bin_path('lvs') + lvs_path = self.module.get_bin_path('lvs') #lvs fields: #LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert lvs = {} if lvs_path: - rc, lv_lines, err = module.run_command( '%s %s' % (lvs_path, lvm_util_options)) + rc, lv_lines, err = self.module.run_command( '%s %s' % (lvs_path, lvm_util_options)) for lv_line in lv_lines.splitlines(): items = lv_line.split() lvs[items[0]] = {'size_g': items[3], 'vg': items[1]} @@ -1263,9 +1255,6 @@ class SunOSHardware(Hardware): """ platform = 'SunOS' - def __init__(self): - Hardware.__init__(self) - def populate(self): self.get_cpu_facts() self.get_memory_facts() @@ -1278,7 +1267,7 @@ class SunOSHardware(Hardware): def get_cpu_facts(self): physid = 0 sockets = {} - rc, out, err = module.run_command("/usr/bin/kstat cpu_info") + rc, out, err = self.module.run_command("/usr/bin/kstat cpu_info") self.facts['processor'] = [] for line in out.split('\n'): if len(line) < 1: @@ -1319,11 +1308,11 @@ class SunOSHardware(Hardware): self.facts['processor_count'] = len(self.facts['processor']) def get_memory_facts(self): - rc, out, err = module.run_command(["/usr/sbin/prtconf"]) + rc, out, err = self.module.run_command(["/usr/sbin/prtconf"]) for line in out.split('\n'): if 'Memory size' in line: self.facts['memtotal_mb'] = line.split()[2] - rc, out, err = module.run_command("/usr/sbin/swap -s") + rc, out, err = self.module.run_command("/usr/sbin/swap -s") allocated = long(out.split()[1][:-1]) reserved = long(out.split()[5][:-1]) used = long(out.split()[8][:-1]) @@ -1362,9 +1351,6 @@ class OpenBSDHardware(Hardware): platform = 'OpenBSD' DMESG_BOOT = '/var/run/dmesg.boot' - def __init__(self): - Hardware.__init__(self) - def populate(self): self.sysctl = self.get_sysctl() self.get_memory_facts() @@ -1374,7 +1360,7 @@ class OpenBSDHardware(Hardware): return self.facts def get_sysctl(self): - rc, out, err = module.run_command(["/sbin/sysctl", "hw"]) + rc, out, err = self.module.run_command(["/sbin/sysctl", "hw"]) if rc != 0: return dict() sysctl = dict() @@ -1403,7 +1389,7 @@ class OpenBSDHardware(Hardware): # procs memory page disks traps cpu # r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id # 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99 - rc, out, err = module.run_command("/usr/bin/vmstat") + rc, out, err = self.module.run_command("/usr/bin/vmstat") if rc == 0: self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024 self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024 @@ -1412,7 +1398,7 @@ class OpenBSDHardware(Hardware): # total: 69268 1K-blocks allocated, 0 used, 69268 available # And for older OpenBSD: # total: 69268k bytes allocated = 0k used, 69268k available - rc, out, err = module.run_command("/sbin/swapctl -sk") + rc, out, err = self.module.run_command("/sbin/swapctl -sk") if rc == 0: swaptrans = maketrans(' ', ' ') data = out.split() @@ -1423,7 +1409,7 @@ class OpenBSDHardware(Hardware): processor = [] dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT) if not dmesg_boot: - rc, dmesg_boot, err = module.run_command("/sbin/dmesg") + rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg") i = 0 for line in dmesg_boot.splitlines(): if line.split(' ', 1)[0] == 'cpu%i:' % i: @@ -1455,9 +1441,6 @@ class FreeBSDHardware(Hardware): platform = 'FreeBSD' DMESG_BOOT = '/var/run/dmesg.boot' - def __init__(self): - Hardware.__init__(self) - def populate(self): self.get_cpu_facts() self.get_memory_facts() @@ -1471,12 +1454,12 @@ class FreeBSDHardware(Hardware): def get_cpu_facts(self): self.facts['processor'] = [] - rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu") + rc, out, err = self.module.run_command("/sbin/sysctl -n hw.ncpu") self.facts['processor_count'] = out.strip() dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT) if not dmesg_boot: - rc, dmesg_boot, err = module.run_command("/sbin/dmesg") + rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg") for line in dmesg_boot.split('\n'): if 'CPU:' in line: cpu = re.sub(r'CPU:\s+', r"", line) @@ -1486,7 +1469,7 @@ class FreeBSDHardware(Hardware): def get_memory_facts(self): - rc, out, err = module.run_command("/sbin/sysctl vm.stats") + rc, out, err = self.module.run_command("/sbin/sysctl vm.stats") for line in out.split('\n'): data = line.split() if 'vm.stats.vm.v_page_size' in line: @@ -1501,7 +1484,7 @@ class FreeBSDHardware(Hardware): # Device 1M-blocks Used Avail Capacity # /dev/ada0p3 314368 0 314368 0% # - rc, out, err = module.run_command("/usr/sbin/swapinfo -k") + rc, out, err = self.module.run_command("/usr/sbin/swapinfo -k") lines = out.split('\n') if len(lines[-1]) == 0: lines.pop() @@ -1525,7 +1508,7 @@ class FreeBSDHardware(Hardware): def get_device_facts(self): sysdir = '/dev' self.facts['devices'] = {} - drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks") + drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks") slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)') if os.path.isdir(sysdir): dirlist = sorted(os.listdir(sysdir)) @@ -1543,7 +1526,7 @@ class FreeBSDHardware(Hardware): Use dmidecode executable if available''' # Fall back to using dmidecode, if available - dmi_bin = module.get_bin_path('dmidecode') + dmi_bin = self.module.get_bin_path('dmidecode') DMI_DICT = dict( bios_date='bios-release-date', bios_version='bios-version', @@ -1556,7 +1539,7 @@ class FreeBSDHardware(Hardware): ) for (k, v) in DMI_DICT.items(): if dmi_bin is not None: - (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v)) + (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v)) if rc == 0: # Strip out commented lines (specific dmidecode output) self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ]) @@ -1587,9 +1570,6 @@ class NetBSDHardware(Hardware): platform = 'NetBSD' MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'] - def __init__(self): - Hardware.__init__(self) - def populate(self): self.get_cpu_facts() self.get_memory_facts() @@ -1665,9 +1645,6 @@ class AIX(Hardware): """ platform = 'AIX' - def __init__(self): - Hardware.__init__(self) - def populate(self): self.get_cpu_facts() self.get_memory_facts() @@ -1678,7 +1655,7 @@ class AIX(Hardware): self.facts['processor'] = [] - rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor") + rc, out, err = self.module.run_command("/usr/sbin/lsdev -Cc processor") if out: i = 0 for line in out.split('\n'): @@ -1691,19 +1668,19 @@ class AIX(Hardware): i += 1 self.facts['processor_count'] = int(i) - rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type") + rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type") data = out.split(' ') self.facts['processor'] = data[1] - rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads") + rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads") data = out.split(' ') self.facts['processor_cores'] = int(data[1]) def get_memory_facts(self): pagesize = 4096 - rc, out, err = module.run_command("/usr/bin/vmstat -v") + rc, out, err = self.module.run_command("/usr/bin/vmstat -v") for line in out.split('\n'): data = line.split() if 'memory pages' in line: @@ -1716,7 +1693,7 @@ class AIX(Hardware): # Device 1M-blocks Used Avail Capacity # /dev/ada0p3 314368 0 314368 0% # - rc, out, err = module.run_command("/usr/sbin/lsps -s") + rc, out, err = self.module.run_command("/usr/sbin/lsps -s") if out: lines = out.split('\n') data = lines[1].split() @@ -1726,7 +1703,7 @@ class AIX(Hardware): self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100) def get_dmi_facts(self): - rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion") + rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion") data = out.split() self.facts['firmware_version'] = data[1].strip('IBM,') @@ -1746,9 +1723,6 @@ class HPUX(Hardware): platform = 'HP-UX' - def __init__(self): - Hardware.__init__(self) - def populate(self): self.get_cpu_facts() self.get_memory_facts() @@ -1757,31 +1731,31 @@ class HPUX(Hardware): def get_cpu_facts(self): if self.facts['architecture'] == '9000/800': - rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True) + rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True) self.facts['processor_count'] = int(out.strip()) #Working with machinfo mess elif self.facts['architecture'] == 'ia64': if self.facts['distribution_version'] == "B.11.23": - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True) self.facts['processor_count'] = int(out.strip().split('=')[1]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True) self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip() - rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True) + rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True) self.facts['processor_cores'] = int(out.strip()) if self.facts['distribution_version'] == "B.11.31": #if machinfo return cores strings release B.11.31 > 1204 - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True) if out.strip()== '0': - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True) self.facts['processor_count'] = int(out.strip().split(" ")[0]) #If hyperthreading is active divide cores by 2 - rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True) data = re.sub(' +',' ',out).strip().split(' ') if len(data) == 1: hyperthreading = 'OFF' else: hyperthreading = data[1] - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True) data = out.strip().split(" ") if hyperthreading == 'ON': self.facts['processor_cores'] = int(data[0])/2 @@ -1790,54 +1764,54 @@ class HPUX(Hardware): self.facts['processor_cores'] = self.facts['processor_count'] else: self.facts['processor_cores'] = int(data[0]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True) self.facts['processor'] = out.strip() else: - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True) self.facts['processor_count'] = int(out.strip().split(" ")[0]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True) self.facts['processor_cores'] = int(out.strip().split(" ")[0]) - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True) self.facts['processor'] = out.strip() def get_memory_facts(self): pagesize = 4096 - rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True) data = int(re.sub(' +',' ',out).split(' ')[5].strip()) self.facts['memfree_mb'] = pagesize * data / 1024 / 1024 if self.facts['architecture'] == '9000/800': try: - rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log") + rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log") data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip() self.facts['memtotal_mb'] = int(data) / 1024 except AttributeError: #For systems where memory details aren't sent to syslog or the log has rotated, use parsed #adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root. if os.access("/dev/kmem", os.R_OK): - rc, out, err = module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True) + rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True) if not err: data = out self.facts['memtotal_mb'] = int(data) / 256 else: - rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True) data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip() self.facts['memtotal_mb'] = int(data) - rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q") + rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f -q") self.facts['swaptotal_mb'] = int(out.strip()) - rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True) swap = 0 for line in out.strip().split('\n'): swap += int(re.sub(' +',' ',line).split(' ')[3].strip()) self.facts['swapfree_mb'] = swap def get_hw_facts(self): - rc, out, err = module.run_command("model") + rc, out, err = self.module.run_command("model") self.facts['model'] = out.strip() if self.facts['architecture'] == 'ia64': separator = ':' if self.facts['distribution_version'] == "B.11.23": separator = '=' - rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True) + rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True) self.facts['firmware_version'] = out.split(separator)[1].strip() @@ -1854,9 +1828,6 @@ class Darwin(Hardware): """ platform = 'Darwin' - def __init__(self): - Hardware.__init__(self) - def populate(self): self.sysctl = self.get_sysctl() self.get_mac_facts() @@ -1865,7 +1836,7 @@ class Darwin(Hardware): return self.facts def get_sysctl(self): - rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"]) + rc, out, err = self.module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"]) if rc != 0: return dict() sysctl = dict() @@ -1876,7 +1847,7 @@ class Darwin(Hardware): return sysctl def get_system_profile(self): - rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"]) + rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"]) if rc != 0: return dict() system_profile = dict() @@ -1887,7 +1858,7 @@ class Darwin(Hardware): return system_profile def get_mac_facts(self): - rc, out, err = module.run_command("sysctl hw.model") + rc, out, err = self.module.run_command("sysctl hw.model") if rc == 0: self.facts['model'] = out.splitlines()[-1].split()[1] self.facts['osversion'] = self.sysctl['kern.osversion'] @@ -1905,7 +1876,7 @@ class Darwin(Hardware): def get_memory_facts(self): self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024 - rc, out, err = module.run_command("sysctl hw.usermem") + rc, out, err = self.module.run_command("sysctl hw.usermem") if rc == 0: self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024 @@ -1936,10 +1907,6 @@ class Network(Facts): subclass = sc return super(cls, subclass).__new__(subclass, *arguments, **keyword) - def __init__(self, module): - self.module = module - Facts.__init__(self) - def populate(self): return self.facts @@ -1953,9 +1920,6 @@ class LinuxNetwork(Network): """ platform = 'Linux' - def __init__(self, module): - Network.__init__(self, module) - def populate(self): ip_path = self.module.get_bin_path('ip') if ip_path is None: @@ -1987,7 +1951,7 @@ class LinuxNetwork(Network): continue if v == 'v6' and not socket.has_ipv6: continue - rc, out, err = module.run_command(command[v]) + rc, out, err = self.module.run_command(command[v]) if not out: # v6 routing may result in # RTNETLINK answers: Invalid argument @@ -2147,7 +2111,7 @@ class LinuxNetwork(Network): if not address == '::1': ips['all_ipv6_addresses'].append(address) - ip_path = module.get_bin_path("ip") + ip_path = self.module.get_bin_path("ip") args = [ip_path, 'addr', 'show', 'primary', device] rc, stdout, stderr = self.module.run_command(args) @@ -2182,16 +2146,13 @@ class GenericBsdIfconfigNetwork(Network): """ platform = 'Generic_BSD_Ifconfig' - def __init__(self, module): - Network.__init__(self, module) - def populate(self): - ifconfig_path = module.get_bin_path('ifconfig') + ifconfig_path = self.module.get_bin_path('ifconfig') if ifconfig_path is None: return self.facts - route_path = module.get_bin_path('route') + route_path = self.module.get_bin_path('route') if route_path is None: return self.facts @@ -2230,7 +2191,7 @@ class GenericBsdIfconfigNetwork(Network): if v == 'v6' and not socket.has_ipv6: continue - rc, out, err = module.run_command(command[v]) + rc, out, err = self.module.run_command(command[v]) if not out: # v6 routing may result in # RTNETLINK answers: Invalid argument @@ -2257,7 +2218,7 @@ class GenericBsdIfconfigNetwork(Network): # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a' # when running the command 'ifconfig'. # Solaris must explicitly run the command 'ifconfig -a'. - rc, out, err = module.run_command([ifconfig_path, ifconfig_options]) + rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options]) for line in out.split('\n'): @@ -2403,9 +2364,6 @@ class HPUXNetwork(Network): """ platform = 'HP-UX' - def __init__(self, module): - Network.__init__(self, module) - def populate(self): netstat_path = self.module.get_bin_path('netstat') if netstat_path is None: @@ -2418,7 +2376,7 @@ class HPUXNetwork(Network): return self.facts def get_default_interfaces(self): - rc, out, err = module.run_command("/usr/bin/netstat -nr") + rc, out, err = self.module.run_command("/usr/bin/netstat -nr") lines = out.split('\n') for line in lines: words = line.split() @@ -2429,7 +2387,7 @@ class HPUXNetwork(Network): def get_interfaces_info(self): interfaces = {} - rc, out, err = module.run_command("/usr/bin/netstat -ni") + rc, out, err = self.module.run_command("/usr/bin/netstat -ni") lines = out.split('\n') for line in lines: words = line.split() @@ -2445,7 +2403,7 @@ class HPUXNetwork(Network): 'address': address } return interfaces -class DarwinNetwork(GenericBsdIfconfigNetwork, Network): +class DarwinNetwork(GenericBsdIfconfigNetwork): """ This is the Mac OS X/Darwin Network Class. It uses the GenericBsdIfconfigNetwork unchanged @@ -2469,21 +2427,21 @@ class DarwinNetwork(GenericBsdIfconfigNetwork, Network): current_if['media_options'] = self.get_options(words[3]) -class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network): +class FreeBSDNetwork(GenericBsdIfconfigNetwork): """ This is the FreeBSD Network Class. It uses the GenericBsdIfconfigNetwork unchanged. """ platform = 'FreeBSD' -class DragonFlyNetwork(GenericBsdIfconfigNetwork, Network): +class DragonFlyNetwork(GenericBsdIfconfigNetwork): """ This is the DragonFly Network Class. It uses the GenericBsdIfconfigNetwork unchanged. """ platform = 'DragonFly' -class AIXNetwork(GenericBsdIfconfigNetwork, Network): +class AIXNetwork(GenericBsdIfconfigNetwork): """ This is the AIX Network Class. It uses the GenericBsdIfconfigNetwork unchanged. @@ -2491,9 +2449,9 @@ class AIXNetwork(GenericBsdIfconfigNetwork, Network): platform = 'AIX' def get_default_interfaces(self, route_path): - netstat_path = module.get_bin_path('netstat') + netstat_path = self.module.get_bin_path('netstat') - rc, out, err = module.run_command([netstat_path, '-nr']) + rc, out, err = self.module.run_command([netstat_path, '-nr']) interface = dict(v4 = {}, v6 = {}) @@ -2518,14 +2476,14 @@ class AIXNetwork(GenericBsdIfconfigNetwork, Network): all_ipv4_addresses = [], all_ipv6_addresses = [], ) - rc, out, err = module.run_command([ifconfig_path, ifconfig_options]) + rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options]) for line in out.split('\n'): if line: words = line.split() - # only this condition differs from GenericBsdIfconfigNetwork + # only this condition differs from GenericBsdIfconfigNetwork if re.match('^\w*\d*:', line): current_if = self.parse_interface_line(words) interfaces[ current_if['device'] ] = current_if @@ -2547,16 +2505,16 @@ class AIXNetwork(GenericBsdIfconfigNetwork, Network): self.parse_inet6_line(words, current_if, ips) else: self.parse_unknown_line(words, current_if, ips) - uname_path = module.get_bin_path('uname') + uname_path = self.module.get_bin_path('uname') if uname_path: - rc, out, err = module.run_command([uname_path, '-W']) + rc, out, err = self.module.run_command([uname_path, '-W']) # don't bother with wpars it does not work # zero means not in wpar if not rc and out.split()[0] == '0': if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']): - entstat_path = module.get_bin_path('entstat') + entstat_path = self.module.get_bin_path('entstat') if entstat_path: - rc, out, err = module.run_command([entstat_path, current_if['device'] ]) + rc, out, err = self.module.run_command([entstat_path, current_if['device'] ]) if rc != 0: break for line in out.split('\n'): @@ -2571,9 +2529,9 @@ class AIXNetwork(GenericBsdIfconfigNetwork, Network): current_if['type'] = 'ether' # device must have mtu attribute in ODM if 'mtu' not in current_if: - lsattr_path = module.get_bin_path('lsattr') + lsattr_path = self.module.get_bin_path('lsattr') if lsattr_path: - rc, out, err = module.run_command([lsattr_path,'-El', current_if['device'] ]) + rc, out, err = self.module.run_command([lsattr_path,'-El', current_if['device'] ]) if rc != 0: break for line in out.split('\n'): @@ -2591,7 +2549,7 @@ class AIXNetwork(GenericBsdIfconfigNetwork, Network): current_if['macaddress'] = 'unknown' # will be overwritten later return current_if -class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network): +class OpenBSDNetwork(GenericBsdIfconfigNetwork): """ This is the OpenBSD Network Class. It uses the GenericBsdIfconfigNetwork. @@ -2606,7 +2564,7 @@ class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network): def parse_lladdr_line(self, words, current_if, ips): current_if['macaddress'] = words[1] -class SunOSNetwork(GenericBsdIfconfigNetwork, Network): +class SunOSNetwork(GenericBsdIfconfigNetwork): """ This is the SunOS Network Class. It uses the GenericBsdIfconfigNetwork. @@ -2627,7 +2585,7 @@ class SunOSNetwork(GenericBsdIfconfigNetwork, Network): all_ipv4_addresses = [], all_ipv6_addresses = [], ) - rc, out, err = module.run_command([ifconfig_path, '-a']) + rc, out, err = self.module.run_command([ifconfig_path, '-a']) for line in out.split('\n'): @@ -2711,9 +2669,6 @@ class Virtual(Facts): subclass = sc return super(cls, subclass).__new__(subclass, *arguments, **keyword) - def __init__(self): - Facts.__init__(self) - def populate(self): return self.facts @@ -2725,9 +2680,6 @@ class LinuxVirtual(Virtual): """ platform = 'Linux' - def __init__(self): - Virtual.__init__(self) - def populate(self): self.get_virtual_facts() return self.facts @@ -2844,9 +2796,9 @@ class LinuxVirtual(Virtual): self.facts['virtualization_type'] = 'powervm_lx86' elif re.match('^vendor_id.*IBM/S390', line): self.facts['virtualization_type'] = 'PR/SM' - lscpu = module.get_bin_path('lscpu') + lscpu = self.module.get_bin_path('lscpu') if lscpu: - rc, out, err = module.run_command(["lscpu"]) + rc, out, err = self.module.run_command(["lscpu"]) if rc == 0: for line in out.split("\n"): data = line.split(":", 1) @@ -2894,9 +2846,6 @@ class FreeBSDVirtual(Virtual): """ platform = 'FreeBSD' - def __init__(self): - Virtual.__init__(self) - def populate(self): self.get_virtual_facts() return self.facts @@ -2916,9 +2865,6 @@ class OpenBSDVirtual(Virtual): """ platform = 'OpenBSD' - def __init__(self): - Virtual.__init__(self) - def populate(self): self.get_virtual_facts() return self.facts @@ -2935,21 +2881,18 @@ class HPUXVirtual(Virtual): """ platform = 'HP-UX' - def __init__(self): - Virtual.__init__(self) - def populate(self): self.get_virtual_facts() return self.facts def get_virtual_facts(self): if os.path.exists('/usr/sbin/vecheck'): - rc, out, err = module.run_command("/usr/sbin/vecheck") + rc, out, err = self.module.run_command("/usr/sbin/vecheck") if rc == 0: self.facts['virtualization_type'] = 'guest' self.facts['virtualization_role'] = 'HP vPar' if os.path.exists('/opt/hpvm/bin/hpvminfo'): - rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo") + rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo") if rc == 0 and re.match('.*Running.*HPVM vPar.*', out): self.facts['virtualization_type'] = 'guest' self.facts['virtualization_role'] = 'HPVM vPar' @@ -2960,7 +2903,7 @@ class HPUXVirtual(Virtual): self.facts['virtualization_type'] = 'host' self.facts['virtualization_role'] = 'HPVM' if os.path.exists('/usr/sbin/parstatus'): - rc, out, err = module.run_command("/usr/sbin/parstatus") + rc, out, err = self.module.run_command("/usr/sbin/parstatus") if rc == 0: self.facts['virtualization_type'] = 'guest' self.facts['virtualization_role'] = 'HP nPar' @@ -2975,15 +2918,12 @@ class SunOSVirtual(Virtual): """ platform = 'SunOS' - def __init__(self): - Virtual.__init__(self) - def populate(self): self.get_virtual_facts() return self.facts def get_virtual_facts(self): - rc, out, err = module.run_command("/usr/sbin/prtdiag") + rc, out, err = self.module.run_command("/usr/sbin/prtdiag") for line in out.split('\n'): if 'VMware' in line: self.facts['virtualization_type'] = 'vmware' @@ -2999,7 +2939,7 @@ class SunOSVirtual(Virtual): self.facts['virtualization_role'] = 'guest' # Check if it's a zone if os.path.exists("/usr/bin/zonename"): - rc, out, err = module.run_command("/usr/bin/zonename") + rc, out, err = self.module.run_command("/usr/bin/zonename") if out.rstrip() != "global": self.facts['container'] = 'zone' # Check if it's a branded zone (i.e. Solaris 8/9 zone) @@ -3008,7 +2948,7 @@ class SunOSVirtual(Virtual): # If it's a zone check if we can detect if our global zone is itself virtualized. # Relies on the "guest tools" (e.g. vmware tools) to be installed if 'container' in self.facts and self.facts['container'] == 'zone': - rc, out, err = module.run_command("/usr/sbin/modinfo") + rc, out, err = self.module.run_command("/usr/sbin/modinfo") for line in out.split('\n'): if 'VMware' in line: self.facts['virtualization_type'] = 'vmware' @@ -3020,7 +2960,7 @@ class SunOSVirtual(Virtual): if os.path.exists("/usr/sbin/virtinfo"): # The output of virtinfo is different whether we are on a machine with logical # domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first. - rc, out, err = module.run_command("/usr/sbin/virtinfo -p") + rc, out, err = self.module.run_command("/usr/sbin/virtinfo -p") # The output contains multiple lines with different keys like this: # DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false # The output may also be not formatted and the returncode is set to 0 regardless of the error condition: @@ -3071,28 +3011,24 @@ def get_file_lines(path): def ansible_facts(module): # Retrieve module parameters - gather_subset = [ 'all' ] + gather_subset = ('all',) if 'gather_subset' in module.params: gather_subset = module.params['gather_subset'] # Retrieve all facts elements if 'all' in gather_subset: - gather_subset = [ 'min', 'hardware', 'network', 'virtual' ] - - # Check subsets and forbid unallowed name - for subset in gather_subset: - if subset not in Facts.ALLOWED_FACT_SUBSET: - raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: %s" % (subset, ", ".join(Facts.ALLOWED_FACT_SUBSET))) + gather_subset = FACT_SUBSETS.keys() + else: + # Check subsets and forbid unallowed name + for subset in gather_subset: + if subset not in FACT_SUBSETS.keys(): + raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" % (subset, ", ".join(FACT_SUBSETS.keys()))) facts = {} - facts.update(Facts().populate()) - if 'hardware' in gather_subset: - facts.update(Hardware().populate()) - if 'network' in gather_subset: - facts.update(Network(module).populate()) - if 'virtual' in gather_subset: - facts.update(Virtual().populate()) facts['gather_subset'] = gather_subset + facts.update(Facts(module).populate()) + for subset in gather_subset: + facts.update(FACT_SUBSETS[subset](module).populate()) return facts # =========================================== @@ -3153,3 +3089,11 @@ def get_all_facts(module): setup_result['_ansible_verbose_override'] = True return setup_result + +### Note: have to define this at the bottom as it references classes defined earlier in this file +# Allowed fact subset for gather_subset options and what classes they use +FACT_SUBSETS = dict( + hardware=Hardware, + network=Network, + virtual=Virtual, +) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index c001419732a..6be4c5367e6 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,7 +64,7 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _gather_subset = FieldAttribute(isa='string', default=None, always_post_validate=True) + _gather_subset = FieldAttribute(isa='list', default=None, always_post_validate=True) _ignore_facter = FieldAttribute(isa='bool', default=None, always_post_validate=True) _ignore_ohai = FieldAttribute(isa='bool', default=None, always_post_validate=True) _hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True) From 512825455e7aa5687a121fda4ac4f934c82171e5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 14 Mar 2016 14:50:27 -0700 Subject: [PATCH 0975/1113] Make ohai and facter work via module_utils Fact classes rather than in the setup module --- docsite/rst/glossary.rst | 4 +- docsite/rst/intro_configuration.rst | 27 ++-- examples/ansible.cfg | 13 +- lib/ansible/constants.py | 2 - lib/ansible/executor/play_iterator.py | 10 -- lib/ansible/module_utils/facts.py | 164 ++++++++++++++-------- lib/ansible/playbook/play.py | 2 - test/integration/Makefile | 5 +- test/integration/test_gathering_facts.yml | 12 +- 9 files changed, 135 insertions(+), 104 deletions(-) diff --git a/docsite/rst/glossary.rst b/docsite/rst/glossary.rst index 7c5bd5f812a..74ff5d50e7f 100644 --- a/docsite/rst/glossary.rst +++ b/docsite/rst/glossary.rst @@ -71,7 +71,7 @@ Facts Facts are simply things that are discovered about remote nodes. While they can be used in playbooks and templates just like variables, facts are things that are inferred, rather than set. Facts are automatically discovered by Ansible when running plays by executing the internal 'setup' module on the remote nodes. You never have to call the setup module explicitly, it just runs, but it can be disabled to save time if it is -not needed or to reduce to a subset. For the convenience of users who are switching from other configuration management systems, the fact module will also pull in facts from the 'ohai' and 'facter' tools if they are installed, which are fact libraries from Chef and Puppet, respectively. You can also ignore them and save time at runtime execution. +not needed or you can tell ansible to collect only a subset of the full facts via the `gather_subset:` option. For the convenience of users who are switching from other configuration management systems, the fact module will also pull in facts from the 'ohai' and 'facter' tools if they are installed, which are fact libraries from Chef and Puppet, respectively. (These may also be disabled via `gather_subset:`) Filter Plugin +++++++++++++ @@ -398,7 +398,7 @@ An optional conditional statement attached to a task that is used to determine i Van Halen +++++++++ -For no particular reason, other than the fact that Michael really likes them, all Ansible releases are codenamed after Van Halen songs. There is no preference given to David Lee Roth vs. Sammy Lee Hagar-era songs, and instrumentals are also allowed. It is unlikely that there will ever be a Jump release, but a Van Halen III codename release is possible. You never know. +For no particular reason, other than the fact that Michael really likes them, all Ansible 0.x and 1.x releases are codenamed after Van Halen songs. There is no preference given to David Lee Roth vs. Sammy Lee Hagar-era songs, and instrumentals are also allowed. It is unlikely that there will ever be a Jump release, but a Van Halen III codename release is possible. You never know. Vars (Variables) ++++++++++++++++ diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 1f7c6ca3131..419eeaf5f9d 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -355,22 +355,29 @@ This option can be useful for those wishing to save fact gathering time. Both 's .. versionadded:: 2.1 -You can specify a subset of gathered facts using the following options:: +You can specify a subset of gathered facts using the following option:: gather_subset = all -:all: gather all subsets -:min: gather a very limited set of facts -:network: gather min and network facts -:hardware: gather min and hardware facts (longest facts to retrieve) -:virtual: gather min and virtual facts +:all: gather all subsets (the default) +:network: gather network facts +:hardware: gather hardware facts (longest facts to retrieve) +:virtual: gather facts about virtual machines hosted on the machine +:ohai: gather facts from ohai +:facter: gather facts from facter -You can combine them using comma separated list (ex: min,network,virtual) +You can combine them using a comma separated list (ex: network,virtual,facter) -You can also disable puppet facter or chef ohai facts collection using following options:: +You can also disable specific subsets by prepending with a `!` like this:: - ignore_ohai = True - ignore_facter = True + # Don't gather hardware facts, facts from chef's ohai or puppet's facter + gather_subset = !hardware,!ohai,!facter + +A set of basic facts are always collected no matter which additional subsets +are selected. If you want to collect the minimal amount of facts, use +`!all`:: + + gather_subset = !all hash_behaviour ============== diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 181630f9c64..fe60bc71aa0 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -33,19 +33,16 @@ # by default retrieve all facts subsets # all - gather all subsets -# min - gather a very limited set of facts # network - gather min and network facts # hardware - gather hardware facts (longest facts to retrieve) # virtual - gather min and virtual facts -# You can combine them using comma (ex: min,network,virtual) +# facter - import facts from facter +# ohai - import facts from ohai +# You can combine them using comma (ex: network,virtual) +# You can negate them using ! (ex: !hardware,!facter,!ohai) +# A minimal set of facts is always gathered. #gather_subset = all -# by default run ohai -#ignore_ohai = False - -# by default run facter -#ignore_facter = False - # additional paths to search for roles in, colon separated #roles_path = /etc/ansible/roles diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 7e91233fcae..365bda04d06 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -157,8 +157,6 @@ DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBL DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GATHER_SUBSET', 'all').lower() -DEFAULT_IGNORE_OHAI = get_config(p, DEFAULTS, 'ignore_ohai', 'ANSIBLE_IGNORE_OHAI', False, boolean=True) -DEFAULT_IGNORE_FACTER = get_config(p, DEFAULTS, 'ignore_facter', 'ANSIBLE_IGNORE_FACTER', False, boolean=True) DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', ispath=True) DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 93321ce8ae6..e47d88ed7f4 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -153,18 +153,10 @@ class PlayIterator: # Default options to gather gather_subset = C.DEFAULT_GATHER_SUBSET - ignore_ohai = C.DEFAULT_IGNORE_OHAI - ignore_facter = C.DEFAULT_IGNORE_FACTER # Retrieve subset to gather if self._play.gather_subset is not None: gather_subset = self._play.gather_subset - # ignore ohai - if self._play.ignore_ohai is not None: - ignore_ohai = self._play.ignore_ohai - # ignore puppet facter - if self._play.ignore_facter is not None: - ignore_facter = self._play.ignore_facter setup_block = Block(play=self._play) setup_task = Task(block=setup_block) @@ -172,8 +164,6 @@ class PlayIterator: setup_task.tags = ['always'] setup_task.args = { 'gather_subset': gather_subset, - 'ignore_ohai' : ignore_ohai, - 'ignore_facter': ignore_facter, } setup_task.set_loader(self._play._loader) setup_block.block = [setup_task] diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index d38ce9207f1..50c9d4ca892 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -163,7 +163,10 @@ class Facts(object): self.module = module self.facts = {} - + ### TODO: Eventually, these should all get moved to populate(). But + # some of the values are currently being used by other subclasses (for + # instance, os_family and distribution). Have to sort out what to do + # about those first. if load_on_init: self.get_platform_facts() self.get_distribution_facts() @@ -2981,6 +2984,52 @@ class SunOSVirtual(Virtual): except ValueError: pass +class Ohai(Facts): + """ + This is a subclass of Facts for including information gathered from Ohai. + """ + + def populate(self): + self.run_ohai() + return self.facts + + def run_ohai(self): + ohai_path = self.module.get_bin_path('ohai') + if ohai_path is None: + return + rc, out, err = self.module.run_command(ohai_path) + try: + self.facts.update(json.loads(out)) + except: + pass + +class Facter(Facts): + """ + This is a subclass of Facts for including information gathered from Facter. + """ + def populate(self): + self.run_facter() + return self.facts + + def run_facter(self): + facter_path = self.module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin']) + cfacter_path = self.module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin']) + # Prefer to use cfacter if available + if cfacter_path is not None: + facter_path = cfacter_path + + if facter_path is None: + return + + # if facter is installed, and we can use --json because + # ruby-json is ALSO installed, include facter data in the JSON + rc, out, err = self.module.run_command(facter_path + " --puppet --json") + try: + self.facts = json.loads(out) + except: + pass + + def get_file_content(path, default=None, strip=True): data = default if os.path.exists(path) and os.access(path, os.R_OK): @@ -3009,76 +3058,70 @@ def get_file_lines(path): ret = [] return ret -def ansible_facts(module): - # Retrieve module parameters - gather_subset = ('all',) - if 'gather_subset' in module.params: - gather_subset = module.params['gather_subset'] - - # Retrieve all facts elements - if 'all' in gather_subset: - gather_subset = FACT_SUBSETS.keys() - else: - # Check subsets and forbid unallowed name - for subset in gather_subset: - if subset not in FACT_SUBSETS.keys(): - raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" % (subset, ", ".join(FACT_SUBSETS.keys()))) - +def ansible_facts(module, gather_subset): facts = {} - facts['gather_subset'] = gather_subset + facts['gather_subset'] = list(gather_subset) facts.update(Facts(module).populate()) for subset in gather_subset: facts.update(FACT_SUBSETS[subset](module).populate()) return facts -# =========================================== -# TODO: remove this dead code? def get_all_facts(module): setup_options = dict(module_setup=True) - facts = ansible_facts(module) + + # Retrieve module parameters + gather_subset = module.params['gather_subset'] + + # Retrieve all facts elements + additional_subsets = set() + exclude_subsets = set() + for subset in gather_subset: + if subset == 'all': + additional_subsets.update(VALID_SUBSETS) + continue + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" % (subset, ", ".join(FACT_SUBSETS.keys()))) + + if exclude: + exclude_subsets.add(subset) + else: + additional_subsets.add(subset) + + if not additional_subsets: + additional_subsets.update(VALID_SUBSETS) + + additional_subsets.difference_update(exclude_subsets) + + # facter and ohai are given a different prefix than other subsets + if 'facter' in additional_subsets: + additional_subsets.difference_update(('facter',)) + facter_ds = FACT_SUBSETS['facter'](module, load_on_init=False).populate() + if facter_ds: + for (k, v) in facter_ds.items(): + setup_options['facter_%s' % k.replace('-', '_')] = v + + if 'ohai' in additional_subsets: + additional_subsets.difference_update(('ohai',)) + ohai_ds = FACT_SUBSETS['ohai'](module, load_on_init=False).populate() + if ohai_ds: + for (k, v) in ohai_ds.items(): + setup_options['ohai_%s' % k.replace('-', '_')] = v + + facts = ansible_facts(module, additional_subsets) for (k, v) in facts.items(): setup_options["ansible_%s" % k.replace('-', '_')] = v - # Look for the path to the facter, cfacter, and ohai binaries and set - # the variable to that path. - - facter_path = module.get_bin_path('facter') - cfacter_path = module.get_bin_path('cfacter') - ohai_path = module.get_bin_path('ohai') - - # Prefer to use cfacter if available - if cfacter_path is not None: - facter_path = cfacter_path - # if facter is installed, and we can use --json because - # ruby-json is ALSO installed, include facter data in the JSON - - if facter_path is not None: - rc, out, err = module.run_command(facter_path + " --json") - facter = True - try: - facter_ds = json.loads(out) - except: - facter = False - if facter: - for (k,v) in facter_ds.items(): - setup_options["facter_%s" % k] = v - - # ditto for ohai - - if ohai_path is not None: - rc, out, err = module.run_command(ohai_path) - ohai = True - try: - ohai_ds = json.loads(out) - except: - ohai = False - if ohai: - for (k,v) in ohai_ds.items(): - k2 = "ohai_%s" % k.replace('-', '_') - setup_options[k2] = v - setup_result = { 'ansible_facts': {} } for (k,v) in setup_options.items(): @@ -3090,10 +3133,13 @@ def get_all_facts(module): return setup_result -### Note: have to define this at the bottom as it references classes defined earlier in this file # Allowed fact subset for gather_subset options and what classes they use +# Note: have to define this at the bottom as it references classes defined earlier in this file FACT_SUBSETS = dict( hardware=Hardware, network=Network, virtual=Virtual, + ohai=Ohai, + facter=Facter, ) +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 6be4c5367e6..b917985bdc6 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -65,8 +65,6 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) _gather_subset = FieldAttribute(isa='list', default=None, always_post_validate=True) - _ignore_facter = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _ignore_ohai = FieldAttribute(isa='bool', default=None, always_post_validate=True) _hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) diff --git a/test/integration/Makefile b/test/integration/Makefile index 07b2d3c78cb..331022ce516 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -23,7 +23,7 @@ VAULT_PASSWORD_FILE = vault-password CONSUL_RUNNING := $(shell python consul_running.py) EUID := $(shell id -u -r) -all: setup test_test_infra parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log test_connection +all: setup test_test_infra parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log test_connection test_gathering_facts test_test_infra: [ "$$(ansible-playbook -i $(INVENTORY) test_test_infra.yml -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) | fgrep works | xargs)" = "msg: fail works (True) msg: assert works (True)" ] @@ -70,6 +70,9 @@ unicode: setup test_templating_settings: setup ansible-playbook test_templating_settings.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +test_gathering_facts: setup + ansible-playbook test_gathering_facts.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) -v $(TEST_FLAGS) + environment: setup ansible-playbook test_environment.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) diff --git a/test/integration/test_gathering_facts.yml b/test/integration/test_gathering_facts.yml index 03d707fbf1a..eb39c84b597 100644 --- a/test/integration/test_gathering_facts.yml +++ b/test/integration/test_gathering_facts.yml @@ -3,20 +3,16 @@ - hosts: localhost tags: [ 'min' ] connection: local - gather_subset: "min" - ignore_facter: yes - ignore_ohai: yes + gather_subset: "!all" gather_facts: yes tasks: - debug: var={{item}} - with_items: [ 'ansible_user_id', 'ansible_interfaces', 'ansible_mounts', 'ansible_virtualization_role' ] + with_items: [ 'ansible_user_id', 'ansible_interfaces', 'ansible_mounts', 'ansible_virtualization_role' ] - hosts: localhost tags: [ 'network' ] connection: local gather_subset: "network" - ignore_facter: yes - ignore_ohai: yes gather_facts: yes tasks: - debug: var={{item}} @@ -26,8 +22,6 @@ tags: [ 'hardware' ] connection: local gather_subset: "hardware" - ignore_facter: yes - ignore_ohai: yes gather_facts: yes tasks: - debug: var={{item}} @@ -37,8 +31,6 @@ tags: [ 'virtual' ] connection: local gather_subset: "virtual" - ignore_facter: yes - ignore_ohai: yes gather_facts: yes tasks: - debug: var={{item}} From c039ac524d2d0886d8fae67bb7e2188ee6b4a33e Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Mon, 14 Mar 2016 15:18:39 -0700 Subject: [PATCH 0976/1113] updates to nxos shared module This commit address a number of minor updates the nxos shared module * connect() is now lazy loaded * parse inner output messages when errored * code syntax cleanup --- lib/ansible/module_utils/nxos.py | 80 ++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py index e18aea47739..e1d71ddc926 100644 --- a/lib/ansible/module_utils/nxos.py +++ b/lib/ansible/module_utils/nxos.py @@ -25,10 +25,11 @@ NET_COMMON_ARGS = dict( password=dict(no_log=True), transport=dict(default='cli', choices=['cli', 'nxapi']), use_ssl=dict(default=False, type='bool'), - provider=dict() + provider=dict(type='dict') ) NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash'] + NXAPI_ENCODINGS = ['json', 'xml'] def to_list(val): @@ -49,7 +50,7 @@ class Nxapi(object): self.module.params['url_password'] = module.params['password'] self.url = None - self.enable = None + self._nxapi_auth = None def _get_body(self, commands, command_type, encoding, version='1.2', chunk='0', sid=None): """Encodes a NXAPI JSON request message @@ -58,8 +59,9 @@ class Nxapi(object): commands = ' ;'.join(commands) if encoding not in NXAPI_ENCODINGS: - self.module.fail_json("Invalid encoding. Received %s. Expected one of %s" % - (encoding, ','.join(NXAPI_ENCODINGS))) + msg = 'invalid encoding, received %s, exceped one of %s' % \ + (encoding, ','.join(NXAPI_ENCODINGS)) + self.module_fail_json(msg=msg) msg = { 'version': version, @@ -92,39 +94,36 @@ class Nxapi(object): clist = to_list(commands) if command_type not in NXAPI_COMMAND_TYPES: - self.module.fail_json(msg="Invalid command_type. Received %s. Expected one of %s." % - (command_type, ','.join(NXAPI_COMMAND_TYPES))) + msg = 'invalid command_type, received %s, exceped one of %s' % \ + (command_type, ','.join(NXAPI_COMMAND_TYPES)) + self.module_fail_json(msg=msg) + + debug = dict() data = self._get_body(clist, command_type, encoding) data = self.module.jsonify(data) headers = {'Content-Type': 'application/json'} + if self._nxapi_auth: + headers['Cookie'] = self._nxapi_auth response, headers = fetch_url(self.module, self.url, data=data, headers=headers, method='POST') + self._nxapi_auth = headers.get('set-cookie') + if headers['status'] != 200: self.module.fail_json(**headers) response = self.module.from_json(response.read()) result = list() - try: - output = response['ins_api']['outputs']['output'] - if isinstance(output, list): - for item in response['ins_api']['outputs']['output']: - if item['code'] != '200': - self.module.fail_json(msg=item['msg'], command=item['input'], - code=item['code']) - else: - result.append(item['body']) - elif output['code'] != '200': - self.module.fail_json(msg=item['msg'], command=item['input'], - code=item['code']) + output = response['ins_api']['outputs']['output'] + for item in to_list(output): + if item['code'] != '200': + self.module.fail_json(**item) else: - result.append(output['body']) - except Exception: - self.module.fail_json(**headers) + result.append(item['body']) return result @@ -142,12 +141,11 @@ class Cli(object): username = self.module.params['username'] password = self.module.params['password'] - self.shell = Shell() - try: + self.shell = Shell() self.shell.open(host, port=port, username=username, password=password) except Exception, exc: - msg = 'failed to connecto to %s:%s - %s' % (host, port, str(exc)) + msg = 'failed to connect to %s:%s - %s' % (host, port, str(exc)) self.module.fail_json(msg=msg) def send(self, commands, encoding='text'): @@ -160,6 +158,11 @@ class NetworkModule(AnsibleModule): super(NetworkModule, self).__init__(*args, **kwargs) self.connection = None self._config = None + self._connected = False + + @property + def connected(self): + return self._connected @property def config(self): @@ -183,24 +186,31 @@ class NetworkModule(AnsibleModule): self.connection = Cli(self) self.connection.connect() + if self.params['transport'] == 'cli': - self.execute('terminal length 0') + self.connection.send('terminal length 0') + + self._connected = True + + + def configure_cli(self, commands): + commands = to_list(commands) + commands.insert(0, 'configure') + responses = self.execute(commands) + responses.pop(0) + return responses def configure(self, commands): commands = to_list(commands) if self.params['transport'] == 'cli': - commands.insert(0, 'configure') - responses = self.execute(commands) - responses.pop(0) + return self.configure_cli(commands) else: - responses = self.execute(commands, command_type='cli_conf') - return responses + return self.execute(commands, command_type='cli_conf') def execute(self, commands, **kwargs): - try: - return self.connection.send(commands, **kwargs) - except Exception, exc: - self.fail_json(msg=exc.message, commands=commands) + if not self.connected: + self.connect() + return self.connection.send(commands, **kwargs) def disconnect(self): self.connection.close() @@ -212,7 +222,6 @@ class NetworkModule(AnsibleModule): cmd = 'show running-config' if self.params.get('include_defaults'): cmd += ' all' - response = self.execute(cmd) return response[0] @@ -231,5 +240,4 @@ def get_module(**kwargs): if module.params['transport'] == 'cli' and not HAS_PARAMIKO: module.fail_json(msg='paramiko is required but does not appear to be installed') - module.connect() return module From f0e6d28815da7e730a39fd546649c06d0e7566c0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 14 Mar 2016 18:48:40 -0700 Subject: [PATCH 0977/1113] Make integration tests for fact gathering assert on failure --- test/integration/inventory | 3 + test/integration/test_gathering_facts.yml | 111 ++++++++++++++++++---- 2 files changed, 98 insertions(+), 16 deletions(-) diff --git a/test/integration/inventory b/test/integration/inventory index bee36ce022e..b833343839c 100644 --- a/test/integration/inventory +++ b/test/integration/inventory @@ -4,6 +4,9 @@ testhost2 ansible_ssh_host=127.0.0.1 ansible_connection=local # For testing delegate_to testhost3 ansible_ssh_host=127.0.0.3 testhost4 ansible_ssh_host=127.0.0.4 +# For testing fact gathering +facthost[0:7] ansible_host=1270.0.0.1 ansible_connection=local + # the following inline declarations are accompanied # by (preferred) group_vars/ and host_vars/ variables diff --git a/test/integration/test_gathering_facts.yml b/test/integration/test_gathering_facts.yml index eb39c84b597..8e93be0a8c5 100644 --- a/test/integration/test_gathering_facts.yml +++ b/test/integration/test_gathering_facts.yml @@ -1,37 +1,116 @@ --- -- hosts: localhost - tags: [ 'min' ] +- hosts: facthost0 + tags: [ 'fact_min' ] connection: local gather_subset: "!all" gather_facts: yes tasks: - - debug: var={{item}} - with_items: [ 'ansible_user_id', 'ansible_interfaces', 'ansible_mounts', 'ansible_virtualization_role' ] + - name: Test that only retrieving minimal facts work + assert: + that: + - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_interfaces|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' -- hosts: localhost - tags: [ 'network' ] +- hosts: facthost1 + tags: [ 'fact_network' ] connection: local gather_subset: "network" gather_facts: yes tasks: - - debug: var={{item}} - with_items: [ 'ansible_user_id', 'ansible_interfaces', 'ansible_mounts', 'ansible_virtualization_role' ] + - name: Test that retrieving network facts work + assert: + that: + - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' -- hosts: localhost - tags: [ 'hardware' ] +- hosts: facthost2 + tags: [ 'fact_hardware' ] connection: local gather_subset: "hardware" gather_facts: yes tasks: - - debug: var={{item}} - with_items: [ 'ansible_user_id', 'ansible_interfaces', 'ansible_mounts', 'ansible_virtualization_role' ] + - name: Test that retrieving hardware facts work + assert: + that: + - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_interfaces|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_mounts|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' -- hosts: localhost - tags: [ 'virtual' ] +- hosts: facthost3 + tags: [ 'fact_virtual' ] connection: local gather_subset: "virtual" gather_facts: yes tasks: - - debug: var={{item}} - with_items: [ 'ansible_user_id', 'ansible_interfaces', 'ansible_mounts', 'ansible_virtualization_role' ] + - name: Test that retrieving virtualization facts work + assert: + that: + - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_interfaces|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' + +- hosts: facthost4 + tags: [ 'fact_comma_string' ] + connection: local + gather_subset: "virtual,network" + gather_facts: yes + tasks: + - name: Test that retrieving virtualization and network as a string works + assert: + that: + - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' + +- hosts: facthost5 + tags: [ 'fact_yaml_list' ] + connection: local + gather_subset: + - virtual + - network + gather_facts: yes + tasks: + - name: Test that retrieving virtualization and network as a string works + assert: + that: + - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' + +- hosts: facthost6 + tags: [ 'fact_negation' ] + connection: local + gather_subset: "!hardware" + gather_facts: yes + tasks: + - name: Test that negation of fact subsets work + assert: + that: + - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' + +- hosts: facthost7 + tags: [ 'fact_mixed_negation_addition' ] + connection: local + gather_subset: "!hardware,network" + gather_facts: yes + tasks: + - name: Test that negation and additional subsets work together + assert: + that: + - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' + From ace0c9c5c2a97129840c50c630df866ffbc6e9c4 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Mon, 14 Mar 2016 18:47:21 -0700 Subject: [PATCH 0978/1113] fixes ansible-modules-code#3250 Sets the default value for transport to 'cli' --- lib/ansible/module_utils/eos.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/eos.py b/lib/ansible/module_utils/eos.py index 37e62a4de27..cb1e4f41273 100644 --- a/lib/ansible/module_utils/eos.py +++ b/lib/ansible/module_utils/eos.py @@ -26,7 +26,7 @@ NET_COMMON_ARGS = dict( password=dict(no_log=True), authorize=dict(default=False, type='bool'), auth_pass=dict(no_log=True), - transport=dict(choices=['cli', 'eapi']), + transport=dict(default='cli', choices=['cli', 'eapi']), use_ssl=dict(default=True, type='bool'), provider=dict(type='dict') ) From 051b9e7b907694934a2df74b5bb67ccf1e1d1820 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 14 Mar 2016 22:33:46 -0400 Subject: [PATCH 0979/1113] added warning about additional requirements --- docsite/rst/intro_installation.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 0859c4ce2a7..03e6bd7dfdd 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -55,6 +55,10 @@ This includes Red Hat, Debian, CentOS, OS X, any of the BSDs, and so on. you'll need to raise the ulimit, like so ``sudo launchctl limit maxfiles 1024 unlimited``. Or just any time you see a "Too many open files" error. +.. warning:: + + Please note that some modules and plugins have additional requirements, for modules these need to be satisfied on the 'target' machine and should be listed in the module specific docs. + .. _managed_node_requirements: Managed Node Requirements From f3b9449e079fd5403f25c18d7d71417a2fc0e639 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Mon, 14 Mar 2016 20:13:23 -0700 Subject: [PATCH 0980/1113] don't raise exceptoins on bad hosts files fixes #14969 --- lib/ansible/module_utils/facts.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 1aa16c9feeb..835ebd74d80 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -770,7 +770,8 @@ class Facts(object): for nameserver in tokens[1:]: self.facts['dns']['nameservers'].append(nameserver) elif tokens[0] == 'domain': - self.facts['dns']['domain'] = tokens[1] + if len(tokens) > 1: + self.facts['dns']['domain'] = tokens[1] elif tokens[0] == 'search': self.facts['dns']['search'] = [] for suffix in tokens[1:]: @@ -781,12 +782,13 @@ class Facts(object): self.facts['dns']['sortlist'].append(address) elif tokens[0] == 'options': self.facts['dns']['options'] = {} - for option in tokens[1:]: - option_tokens = option.split(':', 1) - if len(option_tokens) == 0: - continue - val = len(option_tokens) == 2 and option_tokens[1] or True - self.facts['dns']['options'][option_tokens[0]] = val + if len(tokens) > 1: + for option in tokens[1:]: + option_tokens = option.split(':', 1) + if len(option_tokens) == 0: + continue + val = len(option_tokens) == 2 and option_tokens[1] or True + self.facts['dns']['options'][option_tokens[0]] = val def _get_mount_size_facts(self, mountpoint): size_total = None From 69764b5fbb3e61dabcf62b348bbf0a027ec77d7b Mon Sep 17 00:00:00 2001 From: Henrik Holmboe <henrik@holmboe.se> Date: Tue, 15 Mar 2016 10:04:55 +0100 Subject: [PATCH 0981/1113] 2016 is the year --- RELEASES.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/RELEASES.txt b/RELEASES.txt index d04845c3926..871624a2595 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -9,8 +9,8 @@ Active Development Released ++++++++ -2.0.1 "Over the Hills and Far Away" 02-24-2015 -2.0.0 "Over the Hills and Far Away" 01-12-2015 +2.0.1 "Over the Hills and Far Away" 02-24-2016 +2.0.0 "Over the Hills and Far Away" 01-12-2016 1.9.4 "Dancing In the Streets" 10-09-2015 1.9.3 "Dancing In the Streets" 09-03-2015 1.9.2 "Dancing In the Streets" 06-24-2015 From cd6d865e271183901d9dec5668f0c3adc478c933 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A4=8F=E6=81=BA=28Xia=20Kai=29?= <xiaket@gmail.com> Date: Tue, 15 Mar 2016 10:01:31 +0000 Subject: [PATCH 0982/1113] use __mro__ for plugin loading when we search for its base class. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This would relax the constraint a little bit, allowing subclassing existing plugins. Signed-off-by: 夏恺(Xia Kai) <xiaket@gmail.com> --- lib/ansible/plugins/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index 139e5a7d612..afeab7a1038 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -329,7 +329,7 @@ class PluginLoader: obj = getattr(self._module_cache[path], self.class_name) else: obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs) - if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]: + if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__mro__]: return None return obj From 86080fbaa9035b9384a9b42cc5376e7db2c01d29 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 15 Mar 2016 07:49:25 -0700 Subject: [PATCH 0983/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c86a0ef84a4..a8841e68342 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c86a0ef84a46133814bf6f240237640139e09fad +Subproject commit a8841e6834231ed63f8df4bab9d5996d9a78f0bf diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 33a557cc59d..45bba8ec64e 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 33a557cc59dfce486634f4a0c0f0db4431afb0f7 +Subproject commit 45bba8ec64ebf908dd2e480ed620ee98d5665e53 From 9a6a5a551697546fd359991f107e5559f9619013 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 15 Mar 2016 11:58:23 -0700 Subject: [PATCH 0984/1113] add a fact gathering check for the default of all --- test/integration/inventory | 2 +- test/integration/test_gathering_facts.yml | 31 ++++++++++++++++++----- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/test/integration/inventory b/test/integration/inventory index b833343839c..427aa3dc852 100644 --- a/test/integration/inventory +++ b/test/integration/inventory @@ -5,7 +5,7 @@ testhost2 ansible_ssh_host=127.0.0.1 ansible_connection=local testhost3 ansible_ssh_host=127.0.0.3 testhost4 ansible_ssh_host=127.0.0.4 # For testing fact gathering -facthost[0:7] ansible_host=1270.0.0.1 ansible_connection=local +facthost[0:8] ansible_host=1270.0.0.1 ansible_connection=local # the following inline declarations are accompanied diff --git a/test/integration/test_gathering_facts.yml b/test/integration/test_gathering_facts.yml index 8e93be0a8c5..c8a44ea0a7f 100644 --- a/test/integration/test_gathering_facts.yml +++ b/test/integration/test_gathering_facts.yml @@ -1,6 +1,23 @@ --- - hosts: facthost0 + tags: [ 'fact_min' ] + connection: local + gather_subset: "all" + gather_facts: yes + tasks: + - setup: + register: facts + - debug: var=facts + - name: Test that only retrieving minimal facts work + assert: + that: + - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_mounts|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' + +- hosts: facthost1 tags: [ 'fact_min' ] connection: local gather_subset: "!all" @@ -14,7 +31,7 @@ - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' -- hosts: facthost1 +- hosts: facthost2 tags: [ 'fact_network' ] connection: local gather_subset: "network" @@ -28,7 +45,7 @@ - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' -- hosts: facthost2 +- hosts: facthost3 tags: [ 'fact_hardware' ] connection: local gather_subset: "hardware" @@ -42,7 +59,7 @@ - '"{{ ansible_mounts|default("UNDEF") }}" != "UNDEF"' - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' -- hosts: facthost3 +- hosts: facthost4 tags: [ 'fact_virtual' ] connection: local gather_subset: "virtual" @@ -56,7 +73,7 @@ - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' -- hosts: facthost4 +- hosts: facthost5 tags: [ 'fact_comma_string' ] connection: local gather_subset: "virtual,network" @@ -70,7 +87,7 @@ - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' -- hosts: facthost5 +- hosts: facthost6 tags: [ 'fact_yaml_list' ] connection: local gather_subset: @@ -86,7 +103,7 @@ - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' -- hosts: facthost6 +- hosts: facthost7 tags: [ 'fact_negation' ] connection: local gather_subset: "!hardware" @@ -100,7 +117,7 @@ - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' -- hosts: facthost7 +- hosts: facthost8 tags: [ 'fact_mixed_negation_addition' ] connection: local gather_subset: "!hardware,network" From 88310a7f280350df2d72a72c4f46882d8fcfa323 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 15 Mar 2016 15:27:51 -0400 Subject: [PATCH 0985/1113] Adding iproute to certain RH-based docker images --- test/utils/docker/centos7/Dockerfile | 1 + test/utils/docker/fedora-rawhide/Dockerfile | 1 + test/utils/docker/fedora23/Dockerfile | 1 + 3 files changed, 3 insertions(+) diff --git a/test/utils/docker/centos7/Dockerfile b/test/utils/docker/centos7/Dockerfile index 69450e0af19..6e4f93dcaf7 100644 --- a/test/utils/docker/centos7/Dockerfile +++ b/test/utils/docker/centos7/Dockerfile @@ -14,6 +14,7 @@ RUN yum -y install \ epel-release \ file \ git \ + iproute \ make \ mercurial \ rubygems \ diff --git a/test/utils/docker/fedora-rawhide/Dockerfile b/test/utils/docker/fedora-rawhide/Dockerfile index 6ad09779aa8..e587177607a 100644 --- a/test/utils/docker/fedora-rawhide/Dockerfile +++ b/test/utils/docker/fedora-rawhide/Dockerfile @@ -15,6 +15,7 @@ RUN dnf -y install \ findutils \ git \ glibc-locale-source \ + iproute \ make \ mercurial \ procps \ diff --git a/test/utils/docker/fedora23/Dockerfile b/test/utils/docker/fedora23/Dockerfile index 3695b9a613b..d382794359a 100644 --- a/test/utils/docker/fedora23/Dockerfile +++ b/test/utils/docker/fedora23/Dockerfile @@ -15,6 +15,7 @@ RUN dnf -y install \ findutils \ glibc-common \ git \ + iproute \ make \ mercurial \ procps \ From ea58ccfd5e75e1609ad24f0c0be4212883a89f7e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 15 Mar 2016 12:31:26 -0700 Subject: [PATCH 0986/1113] Make the fact gathering give a little more information on failure --- test/integration/test_gathering_facts.yml | 74 +++++++++++------------ 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/test/integration/test_gathering_facts.yml b/test/integration/test_gathering_facts.yml index c8a44ea0a7f..ed3fa841bab 100644 --- a/test/integration/test_gathering_facts.yml +++ b/test/integration/test_gathering_facts.yml @@ -9,13 +9,13 @@ - setup: register: facts - debug: var=facts - - name: Test that only retrieving minimal facts work + - name: Test that retrieving all facts works assert: that: - - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_mounts|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_user_id|default("UNDEF_MIN") }}" != "UNDEF_MIN"' + - '"{{ ansible_interfaces|default("UNDEF_NET") }}" != "UNDEF_NET"' + - '"{{ ansible_mounts|default("UNDEF_HW") }}" != "UNDEF_HW"' + - '"{{ ansible_virtualization_role|default("UNDEF_VIRT") }}" != "UNDEF_VIRT"' - hosts: facthost1 tags: [ 'fact_min' ] @@ -26,10 +26,10 @@ - name: Test that only retrieving minimal facts work assert: that: - - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_interfaces|default("UNDEF") }}" == "UNDEF"' - - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_user_id|default("UNDEF_MIN") }}" != "UNDEF_MIN"' + - '"{{ ansible_interfaces|default("UNDEF_NET") }}" == "UNDEF_NET"' + - '"{{ ansible_mounts|default("UNDEF_HW") }}" == "UNDEF_HW"' + - '"{{ ansible_virtualization_role|default("UNDEF_VIRT") }}" == "UNDEF_VIRT"' - hosts: facthost2 tags: [ 'fact_network' ] @@ -40,10 +40,10 @@ - name: Test that retrieving network facts work assert: that: - - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_user_id|default("UNDEF_MIN") }}" != "UNDEF_MIN"' + - '"{{ ansible_interfaces|default("UNDEF_NET") }}" != "UNDEF_NET"' + - '"{{ ansible_mounts|default("UNDEF_HW") }}" == "UNDEF_HW"' + - '"{{ ansible_virtualization_role|default("UNDEF_VIRT") }}" == "UNDEF_VIRT"' - hosts: facthost3 tags: [ 'fact_hardware' ] @@ -54,10 +54,10 @@ - name: Test that retrieving hardware facts work assert: that: - - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_interfaces|default("UNDEF") }}" == "UNDEF"' - - '"{{ ansible_mounts|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_user_id|default("UNDEF_MIN") }}" != "UNDEF_MIN"' + - '"{{ ansible_interfaces|default("UNDEF_NET") }}" == "UNDEF_NET"' + - '"{{ ansible_mounts|default("UNDEF_HW") }}" != "UNDEF_HW"' + - '"{{ ansible_virtualization_role|default("UNDEF_VIRT") }}" == "UNDEF_VIRT"' - hosts: facthost4 tags: [ 'fact_virtual' ] @@ -68,10 +68,10 @@ - name: Test that retrieving virtualization facts work assert: that: - - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_interfaces|default("UNDEF") }}" == "UNDEF"' - - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_user_id|default("UNDEF_MIN") }}" != "UNDEF_MIN"' + - '"{{ ansible_interfaces|default("UNDEF_NET") }}" == "UNDEF_NET"' + - '"{{ ansible_mounts|default("UNDEF_HW") }}" == "UNDEF_HW"' + - '"{{ ansible_virtualization_role|default("UNDEF_VIRT") }}" != "UNDEF_VIRT"' - hosts: facthost5 tags: [ 'fact_comma_string' ] @@ -82,10 +82,10 @@ - name: Test that retrieving virtualization and network as a string works assert: that: - - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_user_id|default("UNDEF_MIN") }}" != "UNDEF_MIN"' + - '"{{ ansible_interfaces|default("UNDEF_NET") }}" != "UNDEF_NET"' + - '"{{ ansible_mounts|default("UNDEF_HW") }}" == "UNDEF_HW"' + - '"{{ ansible_virtualization_role|default("UNDEF_VIRT") }}" != "UNDEF_VIRT"' - hosts: facthost6 tags: [ 'fact_yaml_list' ] @@ -98,10 +98,10 @@ - name: Test that retrieving virtualization and network as a string works assert: that: - - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_user_id|default("UNDEF_MIN") }}" != "UNDEF_MIN"' + - '"{{ ansible_interfaces|default("UNDEF_NET") }}" != "UNDEF_NET"' + - '"{{ ansible_mounts|default("UNDEF_HW") }}" == "UNDEF_HW"' + - '"{{ ansible_virtualization_role|default("UNDEF_VIRT") }}" != "UNDEF_VIRT"' - hosts: facthost7 tags: [ 'fact_negation' ] @@ -112,10 +112,10 @@ - name: Test that negation of fact subsets work assert: that: - - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - - '"{{ ansible_virtualization_role|default("UNDEF") }}" != "UNDEF"' + - '"{{ ansible_user_id|default("UNDEF_MIN") }}" != "UNDEF_MIN"' + - '"{{ ansible_interfaces|default("UNDEF_NET") }}" != "UNDEF_NET"' + - '"{{ ansible_mounts|default("UNDEF_HW") }}" == "UNDEF_HW"' + - '"{{ ansible_virtualization_role|default("UNDEF_VIRT") }}" != "UNDEF_VIRT"' - hosts: facthost8 tags: [ 'fact_mixed_negation_addition' ] @@ -126,8 +126,8 @@ - name: Test that negation and additional subsets work together assert: that: - - '"{{ ansible_user_id|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_interfaces|default("UNDEF") }}" != "UNDEF"' - - '"{{ ansible_mounts|default("UNDEF") }}" == "UNDEF"' - - '"{{ ansible_virtualization_role|default("UNDEF") }}" == "UNDEF"' + - '"{{ ansible_user_id|default("UNDEF_MIN") }}" != "UNDEF_MIN"' + - '"{{ ansible_interfaces|default("UNDEF_NET") }}" != "UNDEF_NET"' + - '"{{ ansible_mounts|default("UNDEF_HW") }}" == "UNDEF_HW"' + - '"{{ ansible_virtualization_role|default("UNDEF_VIRT") }}" == "UNDEF_VIRT"' From d9f7589460753522f7611b72b687e4ae3481d1a8 Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Tue, 2 Feb 2016 20:19:10 +0100 Subject: [PATCH 0987/1113] cloudstack: new integration tests test_cs_configuration --- test/integration/cloudstack.yml | 2 + .../test_cs_configuration/defaults/main.yml | 5 + .../roles/test_cs_configuration/meta/main.yml | 3 + .../test_cs_configuration/tasks/account.yml | 59 +++++++ .../test_cs_configuration/tasks/cluster.yml | 59 +++++++ .../test_cs_configuration/tasks/main.yml | 162 ++++++++++++++++++ .../test_cs_configuration/tasks/storage.yml | 59 +++++++ .../test_cs_configuration/tasks/zone.yml | 59 +++++++ 8 files changed, 408 insertions(+) create mode 100644 test/integration/roles/test_cs_configuration/defaults/main.yml create mode 100644 test/integration/roles/test_cs_configuration/meta/main.yml create mode 100644 test/integration/roles/test_cs_configuration/tasks/account.yml create mode 100644 test/integration/roles/test_cs_configuration/tasks/cluster.yml create mode 100644 test/integration/roles/test_cs_configuration/tasks/main.yml create mode 100644 test/integration/roles/test_cs_configuration/tasks/storage.yml create mode 100644 test/integration/roles/test_cs_configuration/tasks/zone.yml diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml index 3ad4ed08349..bc275ad3bbd 100644 --- a/test/integration/cloudstack.yml +++ b/test/integration/cloudstack.yml @@ -23,3 +23,5 @@ - { role: test_cs_firewall, tags: test_cs_firewall } - { role: test_cs_loadbalancer_rule, tags: test_cs_loadbalancer_rule } - { role: test_cs_volume, tags: test_cs_volume } + - { role: test_cs_configuration, tags: test_cs_configuration } + diff --git a/test/integration/roles/test_cs_configuration/defaults/main.yml b/test/integration/roles/test_cs_configuration/defaults/main.yml new file mode 100644 index 00000000000..2c68b5099aa --- /dev/null +++ b/test/integration/roles/test_cs_configuration/defaults/main.yml @@ -0,0 +1,5 @@ +--- +test_cs_configuration_storage: PS0 +test_cs_configuration_cluster: C0 +test_cs_configuration_account: admin +test_cs_configuration_zone: Sandbox-simulator diff --git a/test/integration/roles/test_cs_configuration/meta/main.yml b/test/integration/roles/test_cs_configuration/meta/main.yml new file mode 100644 index 00000000000..03e38bd4f7a --- /dev/null +++ b/test/integration/roles/test_cs_configuration/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_configuration/tasks/account.yml b/test/integration/roles/test_cs_configuration/tasks/account.yml new file mode 100644 index 00000000000..853fbf81a3a --- /dev/null +++ b/test/integration/roles/test_cs_configuration/tasks/account.yml @@ -0,0 +1,59 @@ +--- +- name: test configuration account + cs_configuration: + name: allow.public.user.templates + account: "{{ test_cs_configuration_account }}" + value: true + register: config +- name: verify test configuration storage + assert: + that: + - config|success + +- name: test update configuration account + cs_configuration: + name: allow.public.user.templates + account: "{{ test_cs_configuration_account }}" + value: false + register: config +- name: verify update configuration account + assert: + that: + - config|success + - config|changed + - config.value == "false" + - config.name == "allow.public.user.templates" + - config.scope == "account" + - config.account == "{{ test_cs_configuration_account }}" + +- name: test update configuration account idempotence + cs_configuration: + name: allow.public.user.templates + account: "{{ test_cs_configuration_account }}" + value: false + register: config +- name: verify update configuration account idempotence + assert: + that: + - config|success + - not config|changed + - config.value == "false" + - config.name == "allow.public.user.templates" + - config.scope == "account" + - config.account == "{{ test_cs_configuration_account }}" + +- name: test reset configuration account + cs_configuration: + name: allow.public.user.templates + account: "{{ test_cs_configuration_account }}" + value: true + register: config +- name: verify update configuration account + assert: + that: + - config|success + - config|changed + - config.value == "true" + - config.name == "allow.public.user.templates" + - config.scope == "account" + - config.account == "{{ test_cs_configuration_account }}" diff --git a/test/integration/roles/test_cs_configuration/tasks/cluster.yml b/test/integration/roles/test_cs_configuration/tasks/cluster.yml new file mode 100644 index 00000000000..c3328e41d8f --- /dev/null +++ b/test/integration/roles/test_cs_configuration/tasks/cluster.yml @@ -0,0 +1,59 @@ +--- +- name: test configuration cluster + cs_configuration: + name: cpu.overprovisioning.factor + cluster: "{{ test_cs_configuration_cluster }}" + value: 1.0 + register: config +- name: verify test configuration cluster + assert: + that: + - config|success + +- name: test update configuration cluster + cs_configuration: + name: cpu.overprovisioning.factor + cluster: "{{ test_cs_configuration_cluster }}" + value: 2.0 + register: config +- name: verify update configuration cluster + assert: + that: + - config|success + - config|changed + - config.value == "2.0" + - config.name == "cpu.overprovisioning.factor" + - config.scope == "cluster" + - config.cluster == "{{ test_cs_configuration_cluster }}" + +- name: test update configuration cluster idempotence + cs_configuration: + name: cpu.overprovisioning.factor + cluster: "{{ test_cs_configuration_cluster }}" + value: 2.0 + register: config +- name: verify update configuration cluster idempotence + assert: + that: + - config|success + - not config|changed + - config.value == "2.0" + - config.name == "cpu.overprovisioning.factor" + - config.scope == "cluster" + - config.cluster == "{{ test_cs_configuration_cluster }}" + +- name: test reset configuration cluster + cs_configuration: + name: cpu.overprovisioning.factor + cluster: "{{ test_cs_configuration_cluster }}" + value: 1.0 + register: config +- name: verify reset configuration cluster + assert: + that: + - config|success + - config|changed + - config.value == "1.0" + - config.name == "cpu.overprovisioning.factor" + - config.scope == "cluster" + - config.cluster == "{{ test_cs_configuration_cluster }}" diff --git a/test/integration/roles/test_cs_configuration/tasks/main.yml b/test/integration/roles/test_cs_configuration/tasks/main.yml new file mode 100644 index 00000000000..5fdba116809 --- /dev/null +++ b/test/integration/roles/test_cs_configuration/tasks/main.yml @@ -0,0 +1,162 @@ +--- +- name: test fail if missing name + cs_configuration: + register: config + ignore_errors: true +- name: verify results of fail if missing arguments + assert: + that: + - config|failed + - "config.msg == 'missing required arguments: value,name'" + +- name: test configuration + cs_configuration: + name: network.loadbalancer.haproxy.stats.visibility + value: global + register: config +- name: verify test configuration + assert: + that: + - config|success + +- name: test update configuration string + cs_configuration: + name: network.loadbalancer.haproxy.stats.visibility + value: all + register: config +- name: verify test update configuration string + assert: + that: + - config|success + - config|changed + - config.value == "all" + - config.name == "network.loadbalancer.haproxy.stats.visibility" + +- name: test update configuration string idempotence + cs_configuration: + name: network.loadbalancer.haproxy.stats.visibility + value: all + register: config +- name: verify test update configuration string idempotence + assert: + that: + - config|success + - not config|changed + - config.value == "all" + - config.name == "network.loadbalancer.haproxy.stats.visibility" + +- name: test reset configuration string + cs_configuration: + name: network.loadbalancer.haproxy.stats.visibility + value: global + register: config +- name: verify test reset configuration string + assert: + that: + - config|success + - config|changed + - config.value == "global" + - config.name == "network.loadbalancer.haproxy.stats.visibility" + +- name: test configuration + cs_configuration: + name: vmware.recycle.hung.wokervm + value: false + register: config +- name: verify test configuration + assert: + that: + - config|success + +- name: test update configuration bool + cs_configuration: + name: vmware.recycle.hung.wokervm + value: true + register: config +- name: verify test update configuration bool + assert: + that: + - config|success + - config|changed + - config.value == "true" + - config.name == "vmware.recycle.hung.wokervm" + +- name: test update configuration bool idempotence + cs_configuration: + name: vmware.recycle.hung.wokervm + value: true + register: config +- name: verify test update configuration bool idempotence + assert: + that: + - config|success + - not config|changed + - config.value == "true" + - config.name == "vmware.recycle.hung.wokervm" + +- name: test reset configuration bool + cs_configuration: + name: vmware.recycle.hung.wokervm + value: false + register: config +- name: verify test reset configuration bool + assert: + that: + - config|success + - config|changed + - config.value == "false" + - config.name == "vmware.recycle.hung.wokervm" + +- name: test configuration + cs_configuration: + name: agent.load.threshold + value: 0.7 + register: config +- name: verify test configuration + assert: + that: + - config|success + +- name: test update configuration float + cs_configuration: + name: agent.load.threshold + value: 0.81 + register: config +- name: verify update configuration float + assert: + that: + - config|success + - config|changed + - config.value == "0.81" + - config.name == "agent.load.threshold" + +- name: test update configuration float idempotence + cs_configuration: + name: agent.load.threshold + value: 0.81 + register: config +- name: verify update configuration float idempotence + assert: + that: + - config|success + - not config|changed + - config.value == "0.81" + - config.name == "agent.load.threshold" + +- name: reset configuration float + cs_configuration: + name: agent.load.threshold + value: 0.7 + register: config +- name: verify reset configuration float + assert: + that: + - config|success + - config|changed + - config.value == "0.7" + - config.name == "agent.load.threshold" + +- include: storage.yml +- include: account.yml +- include: zone.yml +- include: cluster.yml diff --git a/test/integration/roles/test_cs_configuration/tasks/storage.yml b/test/integration/roles/test_cs_configuration/tasks/storage.yml new file mode 100644 index 00000000000..8201bae0572 --- /dev/null +++ b/test/integration/roles/test_cs_configuration/tasks/storage.yml @@ -0,0 +1,59 @@ +--- +- name: test configuration storage + cs_configuration: + name: storage.overprovisioning.factor + storage: "{{ test_cs_configuration_storage }}" + value: 2.0 + register: config +- name: verify test configuration storage + assert: + that: + - config|success + +- name: test update configuration storage + cs_configuration: + name: storage.overprovisioning.factor + storage: "{{ test_cs_configuration_storage }}" + value: 3.0 + register: config +- name: verify update configuration storage + assert: + that: + - config|success + - config|changed + - config.value == "3.0" + - config.name == "storage.overprovisioning.factor" + - config.scope == "storagepool" + - config.storage == "{{ test_cs_configuration_storage }}" + +- name: test update configuration storage idempotence + cs_configuration: + name: storage.overprovisioning.factor + storage: "{{ test_cs_configuration_storage }}" + value: 3.0 + register: config +- name: verify update configuration storage idempotence + assert: + that: + - config|success + - not config|changed + - config.value == "3.0" + - config.name == "storage.overprovisioning.factor" + - config.scope == "storagepool" + - config.storage == "{{ test_cs_configuration_storage }}" + +- name: test reset configuration storage + cs_configuration: + name: storage.overprovisioning.factor + storage: "{{ test_cs_configuration_storage }}" + value: 2.0 + register: config +- name: verify reset configuration storage + assert: + that: + - config|success + - config|changed + - config.value == "2.0" + - config.name == "storage.overprovisioning.factor" + - config.scope == "storagepool" + - config.storage == "{{ test_cs_configuration_storage }}" diff --git a/test/integration/roles/test_cs_configuration/tasks/zone.yml b/test/integration/roles/test_cs_configuration/tasks/zone.yml new file mode 100644 index 00000000000..423f885c7c0 --- /dev/null +++ b/test/integration/roles/test_cs_configuration/tasks/zone.yml @@ -0,0 +1,59 @@ +--- +- name: test configuration zone + cs_configuration: + name: use.external.dns + zone: "{{ test_cs_configuration_zone }}" + value: false + register: config +- name: verify test configuration zone + assert: + that: + - config|success + +- name: test update configuration zone + cs_configuration: + name: use.external.dns + zone: "{{ test_cs_configuration_zone }}" + value: true + register: config +- name: verify update configuration zone + assert: + that: + - config|success + - config|changed + - config.value == "true" + - config.name == "use.external.dns" + - config.scope == "zone" + - config.zone == "{{ test_cs_configuration_zone }}" + +- name: test update configuration zone idempotence + cs_configuration: + name: use.external.dns + zone: "{{ test_cs_configuration_zone }}" + value: true + register: config +- name: verify update configuration zone idempotence + assert: + that: + - config|success + - not config|changed + - config.value == "true" + - config.name == "use.external.dns" + - config.scope == "zone" + - config.zone == "{{ test_cs_configuration_zone }}" + +- name: test reset configuration zone + cs_configuration: + name: use.external.dns + zone: "{{ test_cs_configuration_zone }}" + value: false + register: config +- name: verify reset configuration zone + assert: + that: + - config|success + - config|changed + - config.value == "false" + - config.name == "use.external.dns" + - config.scope == "zone" + - config.zone == "{{ test_cs_configuration_zone }}" From ae6d2a5602488e63f181e360e9b9c4d27c31d989 Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Tue, 2 Feb 2016 20:19:56 +0100 Subject: [PATCH 0988/1113] cloudstack: new integration tests test_cs_instance_facts --- test/integration/cloudstack.yml | 1 + .../test_cs_instance_facts/defaults/main.yml | 3 + .../test_cs_instance_facts/meta/main.yml | 3 + .../test_cs_instance_facts/tasks/main.yml | 55 +++++++++++++++++++ 4 files changed, 62 insertions(+) create mode 100644 test/integration/roles/test_cs_instance_facts/defaults/main.yml create mode 100644 test/integration/roles/test_cs_instance_facts/meta/main.yml create mode 100644 test/integration/roles/test_cs_instance_facts/tasks/main.yml diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml index bc275ad3bbd..9f64f9ec94c 100644 --- a/test/integration/cloudstack.yml +++ b/test/integration/cloudstack.yml @@ -23,5 +23,6 @@ - { role: test_cs_firewall, tags: test_cs_firewall } - { role: test_cs_loadbalancer_rule, tags: test_cs_loadbalancer_rule } - { role: test_cs_volume, tags: test_cs_volume } + - { role: test_cs_instance_facts, tags: test_cs_instance_facts } - { role: test_cs_configuration, tags: test_cs_configuration } diff --git a/test/integration/roles/test_cs_instance_facts/defaults/main.yml b/test/integration/roles/test_cs_instance_facts/defaults/main.yml new file mode 100644 index 00000000000..e393e60f4f0 --- /dev/null +++ b/test/integration/roles/test_cs_instance_facts/defaults/main.yml @@ -0,0 +1,3 @@ +--- +test_cs_instance_template: CentOS 5.3(64-bit) no GUI (Simulator) +test_cs_instance_offering_1: Small Instance diff --git a/test/integration/roles/test_cs_instance_facts/meta/main.yml b/test/integration/roles/test_cs_instance_facts/meta/main.yml new file mode 100644 index 00000000000..03e38bd4f7a --- /dev/null +++ b/test/integration/roles/test_cs_instance_facts/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_instance_facts/tasks/main.yml b/test/integration/roles/test_cs_instance_facts/tasks/main.yml new file mode 100644 index 00000000000..af35712aa5c --- /dev/null +++ b/test/integration/roles/test_cs_instance_facts/tasks/main.yml @@ -0,0 +1,55 @@ +--- +- name: setup ssh key + cs_sshkeypair: name={{ cs_resource_prefix }}-sshkey + register: sshkey +- name: verify setup ssh key + assert: + that: + - sshkey|success + +- name: setup affinity group + cs_affinitygroup: name={{ cs_resource_prefix }}-ag + register: ag +- name: verify setup affinity group + assert: + that: + - ag|success + +- name: setup security group + cs_securitygroup: name={{ cs_resource_prefix }}-sg + register: sg +- name: verify setup security group + assert: + that: + - sg|success + +- name: setup instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm" + template: "{{ test_cs_instance_template }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + affinity_group: "{{ cs_resource_prefix }}-ag" + security_group: "{{ cs_resource_prefix }}-sg" + ssh_key: "{{ cs_resource_prefix }}-sshkey" + tags: [] + register: instance +- name: verify create instance + assert: + that: + - instance|success + +- name: test instance facts + cs_instance_facts: + name: "{{ cs_resource_prefix }}-vm" + register: instance_facts +- name: verify test instance facts + assert: + that: + - instance_facts|success + - not instance_facts|changed + - cloudstack_instance.id == instance.id + - cloudstack_instance.domain == instance.domain + - cloudstack_instance.account == instance.account + - cloudstack_instance.zone == instance.zone + - cloudstack_instance.name == instance.name + - cloudstack_instance.service_offering == instance.service_offering From d50026b108f3bafc555441c140b07de9bdf510c8 Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Tue, 2 Feb 2016 20:20:53 +0100 Subject: [PATCH 0989/1113] cloudstack: new integration tests test_cs_pod --- test/integration/cloudstack.yml | 2 +- .../roles/test_cs_pod/meta/main.yml | 3 + .../roles/test_cs_pod/tasks/main.yml | 210 ++++++++++++++++++ 3 files changed, 214 insertions(+), 1 deletion(-) create mode 100644 test/integration/roles/test_cs_pod/meta/main.yml create mode 100644 test/integration/roles/test_cs_pod/tasks/main.yml diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml index 9f64f9ec94c..3f732d9fe43 100644 --- a/test/integration/cloudstack.yml +++ b/test/integration/cloudstack.yml @@ -25,4 +25,4 @@ - { role: test_cs_volume, tags: test_cs_volume } - { role: test_cs_instance_facts, tags: test_cs_instance_facts } - { role: test_cs_configuration, tags: test_cs_configuration } - + - { role: test_cs_pod, tags: test_cs_pod } diff --git a/test/integration/roles/test_cs_pod/meta/main.yml b/test/integration/roles/test_cs_pod/meta/main.yml new file mode 100644 index 00000000000..03e38bd4f7a --- /dev/null +++ b/test/integration/roles/test_cs_pod/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_pod/tasks/main.yml b/test/integration/roles/test_cs_pod/tasks/main.yml new file mode 100644 index 00000000000..6f84eb783b6 --- /dev/null +++ b/test/integration/roles/test_cs_pod/tasks/main.yml @@ -0,0 +1,210 @@ +--- +- name: setup pod is absent + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + state: absent + register: pod +- name: verify setup pod is absent + assert: + that: + - pod|success + +- name: setup zone is present + cs_zone: + name: "{{ cs_resource_prefix }}-zone" + dns1: 8.8.8.8 + dns2: 8.8.4.4 + network_type: basic + register: zone +- name: verify setup zone is present + assert: + that: + - zone|success + +- name: test fail if missing name + cs_pod: + register: pod + ignore_errors: true +- name: verify results of fail if missing name + assert: + that: + - pod|failed + - "pod.msg == 'missing required arguments: name'" + +- name: test create pod + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + start_ip: 10.100.10.101 + gateway: 10.100.10.1 + netmask: 255.255.255.0 + register: pod_origin +- name: verify test create pod + assert: + that: + - pod_origin|changed + - pod_origin.allocation_state == "Enabled" + - pod_origin.start_ip == "10.100.10.101" + - pod_origin.end_ip == "10.100.10.254" + - pod_origin.gateway == "10.100.10.1" + - pod_origin.netmask == "255.255.255.0" + - pod_origin.zone == "{{ cs_resource_prefix }}-zone" + +- name: test create pod idempotence + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + start_ip: 10.100.10.101 + gateway: 10.100.10.1 + netmask: 255.255.255.0 + register: pod +- name: verify test create pod idempotence + assert: + that: + - not pod|changed + - pod.allocation_state == "Enabled" + - pod.start_ip == "10.100.10.101" + - pod.end_ip == "10.100.10.254" + - pod.gateway == "10.100.10.1" + - pod.netmask == "255.255.255.0" + - pod.zone == "{{ cs_resource_prefix }}-zone" + +- name: test update pod + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + start_ip: 10.100.10.102 + gateway: 10.100.10.1 + netmask: 255.255.255.0 + register: pod +- name: verify test update pod + assert: + that: + - pod|changed + - pod.allocation_state == "Enabled" + - pod.start_ip == "10.100.10.102" + - pod.end_ip == "10.100.10.254" + - pod.gateway == "10.100.10.1" + - pod.netmask == "255.255.255.0" + - pod.zone == "{{ cs_resource_prefix }}-zone" + +- name: test update pod idempotence + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + start_ip: 10.100.10.102 + gateway: 10.100.10.1 + netmask: 255.255.255.0 + register: pod +- name: verify test update pod idempotence + assert: + that: + - not pod|changed + - pod.allocation_state == "Enabled" + - pod.start_ip == "10.100.10.102" + - pod.end_ip == "10.100.10.254" + - pod.gateway == "10.100.10.1" + - pod.netmask == "255.255.255.0" + - pod.zone == "{{ cs_resource_prefix }}-zone" + +- name: test disable pod + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + state: disabled + register: pod +- name: verify test enable pod + assert: + that: + - pod|changed + - pod.allocation_state == "Disabled" + - pod.id == pod_origin.id + - pod.start_ip == "10.100.10.102" + - pod.end_ip == "10.100.10.254" + - pod.gateway == "10.100.10.1" + - pod.netmask == "255.255.255.0" + - pod.zone == "{{ cs_resource_prefix }}-zone" + +- name: test disable pod idempotence + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + state: disabled + register: pod +- name: verify test enable pod idempotence + assert: + that: + - not pod|changed + - pod.allocation_state == "Disabled" + - pod.id == pod_origin.id + - pod.start_ip == "10.100.10.102" + - pod.end_ip == "10.100.10.254" + - pod.gateway == "10.100.10.1" + - pod.netmask == "255.255.255.0" + - pod.zone == "{{ cs_resource_prefix }}-zone" + +- name: test enable pod + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + state: enabled + register: pod +- name: verify test disable pod + assert: + that: + - pod|changed + - pod.allocation_state == "Enabled" + - pod.id == pod_origin.id + - pod.start_ip == "10.100.10.102" + - pod.end_ip == "10.100.10.254" + - pod.gateway == "10.100.10.1" + - pod.netmask == "255.255.255.0" + - pod.zone == "{{ cs_resource_prefix }}-zone" + + +- name: test enable pod idempotence + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + state: enabled + register: pod +- name: verify test enabled pod idempotence + assert: + that: + - not pod|changed + - pod.allocation_state == "Enabled" + - pod.id == pod_origin.id + - pod.start_ip == "10.100.10.102" + - pod.end_ip == "10.100.10.254" + - pod.gateway == "10.100.10.1" + - pod.netmask == "255.255.255.0" + - pod.zone == "{{ cs_resource_prefix }}-zone" + +- name: test absent pod + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + state: absent + register: pod +- name: verify test create pod + assert: + that: + - pod|changed + - pod.id == pod_origin.id + - pod.allocation_state == "Enabled" + - pod.start_ip == "10.100.10.102" + - pod.end_ip == "10.100.10.254" + - pod.gateway == "10.100.10.1" + - pod.netmask == "255.255.255.0" + - pod.zone == "{{ cs_resource_prefix }}-zone" + +- name: test absent pod idempotence + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + state: absent + register: pod +- name: verify test absent pod idempotence + assert: + that: + - not pod|changed From 4d6a15ebc36e5b3cc088fffed2353f6457552735 Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Tue, 2 Feb 2016 20:21:13 +0100 Subject: [PATCH 0990/1113] cloudstack: new integration tests test_cluster --- test/integration/cloudstack.yml | 1 + .../roles/test_cs_cluster/meta/main.yml | 3 + .../roles/test_cs_cluster/tasks/main.yml | 211 ++++++++++++++++++ 3 files changed, 215 insertions(+) create mode 100644 test/integration/roles/test_cs_cluster/meta/main.yml create mode 100644 test/integration/roles/test_cs_cluster/tasks/main.yml diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml index 3f732d9fe43..99d08bb0cfc 100644 --- a/test/integration/cloudstack.yml +++ b/test/integration/cloudstack.yml @@ -26,3 +26,4 @@ - { role: test_cs_instance_facts, tags: test_cs_instance_facts } - { role: test_cs_configuration, tags: test_cs_configuration } - { role: test_cs_pod, tags: test_cs_pod } + - { role: test_cs_cluster, tags: test_cs_cluster } diff --git a/test/integration/roles/test_cs_cluster/meta/main.yml b/test/integration/roles/test_cs_cluster/meta/main.yml new file mode 100644 index 00000000000..03e38bd4f7a --- /dev/null +++ b/test/integration/roles/test_cs_cluster/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_cluster/tasks/main.yml b/test/integration/roles/test_cs_cluster/tasks/main.yml new file mode 100644 index 00000000000..bfaa09805d5 --- /dev/null +++ b/test/integration/roles/test_cs_cluster/tasks/main.yml @@ -0,0 +1,211 @@ +--- +- name: setup cluster is absent + cs_cluster: + name: "{{ cs_resource_prefix }}-cluster" + state: absent + register: cluster +- name: verify setup cluster is absent + assert: + that: + - cluster|success + +- name: setup zone is present + cs_zone: + name: "{{ cs_resource_prefix }}-zone" + dns1: 8.8.8.8 + dns2: 8.8.4.4 + network_type: basic + register: zone +- name: verify setup zone is present + assert: + that: + - zone|success + +- name: setup pod is preset + cs_pod: + name: "{{ cs_resource_prefix }}-pod" + zone: "{{ cs_resource_prefix }}-zone" + start_ip: 10.100.10.101 + gateway: 10.100.10.1 + netmask: 255.255.255.0 + register: pod +- name: verify setup pod is preset + assert: + that: + - pod|success + +- name: test fail if missing name + cs_cluster: + register: cluster + ignore_errors: true +- name: verify results of fail if missing name + assert: + that: + - cluster|failed + - "cluster.msg == 'missing required arguments: name'" + +- name: test create cluster + cs_cluster: + name: "{{ cs_resource_prefix }}-cluster" + zone: "{{ cs_resource_prefix }}-zone" + hypervisor: simulator + cluster_type: CloudManaged + register: cluster_origin + tags: disable +- name: verify test create cluster + assert: + that: + - cluster_origin|changed + - cluster_origin.name == "{{ cs_resource_prefix }}-cluster" + - cluster_origin.zone == "{{ cs_resource_prefix }}-zone" + - cluster_origin.allocation_state == "Enabled" + - cluster_origin.hypervisor == "Simulator" + - cluster_origin.cluster_type == "CloudManaged" + +- name: test create cluster idempotence + cs_cluster: + name: "{{ cs_resource_prefix }}-Cluster" + zone: "{{ cs_resource_prefix }}-Zone" + hypervisor: Simulator + cluster_type: CloudManaged + register: cluster +- name: verify test create cluster idempotence + assert: + that: + - cluster.id == cluster_origin.id + - not cluster|changed + - cluster.name == "{{ cs_resource_prefix }}-cluster" + - cluster.zone == "{{ cs_resource_prefix }}-zone" + - cluster.allocation_state == "Enabled" + - cluster_origin.hypervisor == "Simulator" + - cluster.cluster_type == "CloudManaged" + +- name: test update cluster + cs_cluster: + name: "{{ cs_resource_prefix }}-cluster" + zone: "{{ cs_resource_prefix }}-zone" + hypervisor: simulator + cluster_type: ExternalManaged + register: cluster +- name: verify test update cluster + assert: + that: + - cluster|changed + - cluster.name == "{{ cs_resource_prefix }}-cluster" + - cluster.zone == "{{ cs_resource_prefix }}-zone" + - cluster.allocation_state == "Enabled" + - cluster.hypervisor == "Simulator" + - cluster.cluster_type == "ExternalManaged" + - cluster.id == cluster_origin.id + +- name: test update cluster idempotence + cs_cluster: + name: "{{ cs_resource_prefix }}-cluster" + zone: "{{ cs_resource_prefix }}-zone" + hypervisor: simulator + cluster_type: ExternalManaged + register: cluster +- name: verify test update cluster idempotence + assert: + that: + - not cluster|changed + - cluster.name == "{{ cs_resource_prefix }}-cluster" + - cluster.zone == "{{ cs_resource_prefix }}-zone" + - cluster.allocation_state == "Enabled" + - cluster.hypervisor == "Simulator" + - cluster.cluster_type == "ExternalManaged" + - cluster.id == cluster_origin.id + +- name: test disable cluster + cs_cluster: + name: "{{ cs_resource_prefix }}-cluster" + state: disabled + register: cluster + tags: disable +- name: verify test disable cluster + assert: + that: + - cluster|changed + - cluster.name == "{{ cs_resource_prefix }}-cluster" + - cluster.zone == "{{ cs_resource_prefix }}-zone" + - cluster.allocation_state == "Disabled" + - cluster.hypervisor == "Simulator" + - cluster.cluster_type == "ExternalManaged" + - cluster.id == cluster_origin.id + tags: disable + +- name: test disable cluster idempotence + cs_cluster: + name: "{{ cs_resource_prefix }}-cluster" + state: disabled + register: cluster + tags: disable +- name: verify test disable cluster idempotence + assert: + that: + - not cluster|changed + - cluster.name == "{{ cs_resource_prefix }}-cluster" + - cluster.zone == "{{ cs_resource_prefix }}-zone" + - cluster.allocation_state == "Disabled" + - cluster.hypervisor == "Simulator" + - cluster.cluster_type == "ExternalManaged" + tags: disable + +- name: test enable cluster + cs_cluster: + name: "{{ cs_resource_prefix }}-cluster" + state: enabled + register: cluster +- name: verify test enable cluster + assert: + that: + - cluster|changed + - cluster.name == "{{ cs_resource_prefix }}-cluster" + - cluster.zone == "{{ cs_resource_prefix }}-zone" + - cluster.allocation_state == "Enabled" + - cluster.hypervisor == "Simulator" + - cluster.cluster_type == "ExternalManaged" + - cluster.id == cluster_origin.id + +- name: test enable cluster idempotence + cs_cluster: + name: "{{ cs_resource_prefix }}-cluster" + state: enabled + register: cluster +- name: verify test enable cluster idempotence + assert: + that: + - not cluster|changed + - cluster.name == "{{ cs_resource_prefix }}-cluster" + - cluster.zone == "{{ cs_resource_prefix }}-zone" + - cluster.allocation_state == "Enabled" + - cluster.hypervisor == "Simulator" + - cluster.cluster_type == "ExternalManaged" + - cluster.id == cluster_origin.id + +- name: test remove cluster + cs_cluster: + name: "{{ cs_resource_prefix }}-cluster" + zone: "{{ cs_resource_prefix }}-zone" + state: absent + register: cluster +- name: verify test remove cluster + assert: + that: + - cluster.id == cluster_origin.id + - cluster|changed + - cluster.name == "{{ cs_resource_prefix }}-cluster" + - cluster.zone == "{{ cs_resource_prefix }}-zone" + - cluster.allocation_state == "Enabled" + - cluster_origin.hypervisor == "Simulator" + +- name: test remove cluster idempotence + cs_cluster: + name: "{{ cs_resource_prefix }}-cluster" + zone: "{{ cs_resource_prefix }}-zone" + state: absent + register: cluster +- name: verify test remove cluster idempotence + assert: + that: + - not cluster|changed From 5d5d905e36dd9e9be7cc4eabb859df77e3e50f6e Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Tue, 2 Feb 2016 20:21:30 +0100 Subject: [PATCH 0991/1113] cloudstack: new integration tests test_cs_resourcelimit --- test/integration/cloudstack.yml | 1 + .../roles/test_cs_resourcelimit/meta/main.yml | 3 + .../roles/test_cs_resourcelimit/tasks/cpu.yml | 76 +++++++++++++++++++ .../test_cs_resourcelimit/tasks/instance.yml | 76 +++++++++++++++++++ .../test_cs_resourcelimit/tasks/main.yml | 61 +++++++++++++++ 5 files changed, 217 insertions(+) create mode 100644 test/integration/roles/test_cs_resourcelimit/meta/main.yml create mode 100644 test/integration/roles/test_cs_resourcelimit/tasks/cpu.yml create mode 100644 test/integration/roles/test_cs_resourcelimit/tasks/instance.yml create mode 100644 test/integration/roles/test_cs_resourcelimit/tasks/main.yml diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml index 99d08bb0cfc..9104237bfd1 100644 --- a/test/integration/cloudstack.yml +++ b/test/integration/cloudstack.yml @@ -27,3 +27,4 @@ - { role: test_cs_configuration, tags: test_cs_configuration } - { role: test_cs_pod, tags: test_cs_pod } - { role: test_cs_cluster, tags: test_cs_cluster } + - { role: test_cs_resourcelimit, tags: test_cs_resourcelimit } diff --git a/test/integration/roles/test_cs_resourcelimit/meta/main.yml b/test/integration/roles/test_cs_resourcelimit/meta/main.yml new file mode 100644 index 00000000000..03e38bd4f7a --- /dev/null +++ b/test/integration/roles/test_cs_resourcelimit/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_resourcelimit/tasks/cpu.yml b/test/integration/roles/test_cs_resourcelimit/tasks/cpu.yml new file mode 100644 index 00000000000..5faa6a9233c --- /dev/null +++ b/test/integration/roles/test_cs_resourcelimit/tasks/cpu.yml @@ -0,0 +1,76 @@ +--- +- name: setup cpu limits account + cs_resourcelimit: + type: cpu + limit: 20 + account: "{{ cs_resource_prefix }}_user" + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify setup cpu limits account + assert: + that: + - rl|success + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.account == "{{ cs_resource_prefix }}_user" + - rl.limit == 20 + - rl.resource_type == "cpu" + +- name: set cpu limits for domain + cs_resourcelimit: + type: cpu + limit: 12 + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify set cpu limits for domain + assert: + that: + - rl|changed + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.limit == 12 + - rl.resource_type == "cpu" + +- name: set cpu limits for domain idempotence + cs_resourcelimit: + type: cpu + limit: 12 + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify set cpu limits for domain + assert: + that: + - not rl|changed + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.limit == 12 + - rl.resource_type == "cpu" + +- name: set cpu limits for account + cs_resourcelimit: + type: cpu + limit: 10 + account: "{{ cs_resource_prefix }}_user" + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify set cpu limits for account + assert: + that: + - rl|changed + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.account == "{{ cs_resource_prefix }}_user" + - rl.limit == 10 + - rl.resource_type == "cpu" + +- name: set cpu limits for account idempotence + cs_resourcelimit: + type: cpu + limit: 10 + account: "{{ cs_resource_prefix }}_user" + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify set cpu limits for account idempotence + assert: + that: + - not rl|changed + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.account == "{{ cs_resource_prefix }}_user" + - rl.limit == 10 + - rl.resource_type == "cpu" diff --git a/test/integration/roles/test_cs_resourcelimit/tasks/instance.yml b/test/integration/roles/test_cs_resourcelimit/tasks/instance.yml new file mode 100644 index 00000000000..9fea9a3545b --- /dev/null +++ b/test/integration/roles/test_cs_resourcelimit/tasks/instance.yml @@ -0,0 +1,76 @@ +--- +- name: setup instance limits account + cs_resourcelimit: + type: instance + limit: 20 + account: "{{ cs_resource_prefix }}_user" + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify setup instance limits account + assert: + that: + - rl|success + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.account == "{{ cs_resource_prefix }}_user" + - rl.limit == 20 + - rl.resource_type == "instance" + +- name: set instance limits for domain + cs_resourcelimit: + type: instance + limit: 12 + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify set instance limits for domain + assert: + that: + - rl|changed + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.limit == 12 + - rl.resource_type == "instance" + +- name: set instance limits for domain idempotence + cs_resourcelimit: + type: instance + limit: 12 + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify set instance limits for domain + assert: + that: + - not rl|changed + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.limit == 12 + - rl.resource_type == "instance" + +- name: set instance limits for account + cs_resourcelimit: + type: instance + limit: 10 + account: "{{ cs_resource_prefix }}_user" + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify set instance limits for account + assert: + that: + - rl|changed + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.account == "{{ cs_resource_prefix }}_user" + - rl.limit == 10 + - rl.resource_type == "instance" + +- name: set instance limits for account idempotence + cs_resourcelimit: + type: instance + limit: 10 + account: "{{ cs_resource_prefix }}_user" + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify set instance limits for account idempotence + assert: + that: + - not rl|changed + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.account == "{{ cs_resource_prefix }}_user" + - rl.limit == 10 + - rl.resource_type == "instance" diff --git a/test/integration/roles/test_cs_resourcelimit/tasks/main.yml b/test/integration/roles/test_cs_resourcelimit/tasks/main.yml new file mode 100644 index 00000000000..f662bb939a8 --- /dev/null +++ b/test/integration/roles/test_cs_resourcelimit/tasks/main.yml @@ -0,0 +1,61 @@ +--- +- name: setup domain + cs_domain: path={{ cs_resource_prefix }}-domain + register: dom +- name: verify setup domain + assert: + that: + - dom|success + +- name: setup account + cs_account: + name: "{{ cs_resource_prefix }}_user" + username: "{{ cs_resource_prefix }}_username" + password: "{{ cs_resource_prefix }}_password" + last_name: "{{ cs_resource_prefix }}_last_name" + first_name: "{{ cs_resource_prefix }}_first_name" + email: "{{ cs_resource_prefix }}@example.com" + network_domain: "{{ cs_resource_prefix }}-local" + domain: "{{ cs_resource_prefix }}-domain" + register: acc +- name: verify setup account + assert: + that: + - acc|success + +- name: test failed unkonwn type + cs_resourcelimit: + type: unkonwn + limit: 20 + domain: "{{ cs_resource_prefix }}-domain" + register: rl + ignore_errors: yes +- name: verify test failed unkonwn type + assert: + that: + - rl|failed + +- name: test failed missing type + cs_resourcelimit: + register: rl + ignore_errors: yes +- name: verify test failed missing type + assert: + that: + - rl|failed + +- name: setup resource limits domain + cs_resourcelimit: + type: instance + limit: 20 + domain: "{{ cs_resource_prefix }}-domain" + register: rl +- name: verify setup resource limits domain + assert: + that: + - rl|success + - rl.domain == "{{ cs_resource_prefix }}-domain" + - rl.limit == 20 + +- include: instance.yml +- include: cpu.yml From d604c8c3a6978c84a3bee9c49934dfacb7a4208b Mon Sep 17 00:00:00 2001 From: Justin Phelps <linuturk@onitato.com> Date: Tue, 15 Mar 2016 16:27:33 -0500 Subject: [PATCH 0992/1113] Remove trailing whitespace from the galaxy init Jinja2 template. Default a description. --- lib/ansible/cli/galaxy.py | 1 + lib/ansible/galaxy/data/metadata_template.j2 | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 17f06409bb5..9aed9d742d8 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -251,6 +251,7 @@ class GalaxyCLI(CLI): inject = dict( author = 'your name', + description = 'your description', company = 'your company (optional)', license = 'license (GPLv2, CC-BY, etc)', issue_tracker_url = 'http://example.com/issue/tracker', diff --git a/lib/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2 index 1054c64bdfa..2b9228f39fd 100644 --- a/lib/ansible/galaxy/data/metadata_template.j2 +++ b/lib/ansible/galaxy/data/metadata_template.j2 @@ -1,12 +1,12 @@ galaxy_info: author: {{ author }} - description: {{description}} + description: {{ description }} company: {{ company }} - + # If the issue tracker for your role is not on github, uncomment the # next line and provide a value # issue_tracker_url: {{ issue_tracker_url }} - + # Some suggested licenses: # - BSD (default) # - MIT @@ -15,7 +15,7 @@ galaxy_info: # - Apache # - CC-BY license: {{ license }} - + min_ansible_version: {{ min_ansible_version }} # Optionally specify the branch Galaxy will use when accessing the GitHub @@ -25,7 +25,7 @@ galaxy_info: # branch will be accepted. Otherwise, in all cases, the repo's default branch # (usually master) will be used. #github_branch: - + # # Below are all platforms currently available. Just uncomment # the ones that apply to your role. If you don't see your @@ -38,9 +38,9 @@ galaxy_info: # - all {%- for version in versions %} # - {{ version }} - {%- endfor %} + {%- endfor -%} {%- endfor %} - + galaxy_tags: [] # List tags for your role here, one per line. A tag is # a keyword that describes and categorizes the role. From 6b1b4e03b0d006a2d402d6455c51a3e2da7996bf Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Tue, 15 Mar 2016 23:27:14 +0100 Subject: [PATCH 0993/1113] changelog: append cs_zone_facts --- CHANGELOG.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a7ac769684..3d79c59d70f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ Ansible Changes By Release * cs_resourcelimit * cs_volume * cs_zone + * cs_zone_facts - windows * win_regmerge * win_timezone @@ -182,7 +183,7 @@ allowed in future versions: * Rewritten dnf module that should be faster and less prone to encountering bugs in cornercases * WinRM connection plugin passes all vars named `ansible_winrm_*` to the underlying pywinrm client. This allows, for instance, `ansible_winrm_server_cert_validation=ignore` to be used with newer versions of pywinrm to disable certificate validation on Python 2.7.9+. -* WinRM connection plugin put_file is significantly faster and no longer has file size limitations. +* WinRM connection plugin put_file is significantly faster and no longer has file size limitations. ####Deprecated Modules (new ones in parens): @@ -952,7 +953,7 @@ New Modules: * apache2_module - cloud * digital_ocean_domain - * digital_ocean_sshkey + * digital_ocean_sshkey * ec2_asg *(configure autoscaling groups)* * ec2_metric_alarm * ec2_scaling_policy @@ -969,7 +970,7 @@ Other notable changes: * libvirt module now supports destroyed and paused as states * s3 module can specify metadata * security token additions to ec2 modules -* setup module code moved into module_utils/, facts now accessible by other modules +* setup module code moved into module_utils/, facts now accessible by other modules * synchronize module sets relative dirs based on inventory or role path * misc bugfixes and other parameters * the ec2_key module now has wait/wait_timeout parameters @@ -1018,7 +1019,7 @@ Major features/changes: * only_if, which is much older than when_foo and was deprecated, is similarly removed. * ssh connection plugin is now more efficient if you add 'pipelining=True' in ansible.cfg under [ssh_connection], see example.cfg * localhost/127.0.0.1 is not required to be in inventory if referenced, if not in inventory, it does not implicitly appear in the 'all' group. -* git module has new parameters (accept_hostkey, key_file, ssh_opts) to ease the usage of git and ssh protocols. +* git module has new parameters (accept_hostkey, key_file, ssh_opts) to ease the usage of git and ssh protocols. * when using accelerate mode, the daemon will now be restarted when specifying a different remote_user between plays. * added no_log: option for tasks. When used, no logging information will be sent to syslog during the module execution. * acl module now handles 'default' and allows for either shorthand entry or specific fields per entry section @@ -1027,7 +1028,7 @@ Major features/changes: * all ec2 modules that work with Eucalyptus also now support a 'validate_certs' option, which can be set to 'off' for installations using self-signed certs. * Start of new integration test infrastructure (WIP, more details TBD) * if repoquery is unavailable, the yum module will automatically attempt to install yum-utils -* ansible-vault: a framework for encrypting your playbooks and variable files +* ansible-vault: a framework for encrypting your playbooks and variable files * added support for privilege escalation via 'su' into bin/ansible and bin/ansible-playbook and associated keywords 'su', 'su_user', 'su_pass' for tasks/plays New modules: @@ -1172,7 +1173,7 @@ Plugins: * jail connection module (FreeBSD) * lxc connection module -* added inventory script for listing FreeBSD jails +* added inventory script for listing FreeBSD jails * added md5 as a Jinja2 filter: {{ path | md5 }} * added a fileglob filter that will return files matching a glob pattern. with_items: "/foo/pattern/*.txt | fileglob" * 'changed' filter returns whether a previous step was changed easier. when: registered_result | changed @@ -1187,7 +1188,7 @@ Misc changes (all module additions/fixes may not listed): * Added `ansible_env` to the list of facts returned by the setup module. * Added `state=touch` to the file module, which functions similarly to the command-line version of `touch`. * Added a -vvvv level, which will show SSH client debugging information in the event of a failure. -* Includes now support the more standard syntax, similar to that of role includes and dependencies. +* Includes now support the more standard syntax, similar to that of role includes and dependencies. * Changed the `user:` parameter on plays to `remote_user:` to prevent confusion with the module of the same name. Still backwards compatible on play parameters. * Added parameter to allow the fetch module to skip the md5 validation step ('validate_md5=false'). This is useful when fetching files that are actively being written to, such as live log files. * Inventory hosts are used in the order they appear in the inventory. From 7d5650219db04fd7af2cc7347dbc9c098931a1d2 Mon Sep 17 00:00:00 2001 From: Clark Boylan <clark.boylan@gmail.com> Date: Tue, 15 Mar 2016 16:42:06 -0700 Subject: [PATCH 0994/1113] Fix openstack inventory for multiple servers Fix openstack inventory for when we have multiple servers with the same name but different IDs. Instead of giving every server with the same name the details for the first server returned with that name add the individual servers as they are returned. This was a logic bug where in a loop over a list of servers we always added the first server in that list despite having more than one server. --- contrib/inventory/openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py index cc1f6dbed67..e8687788505 100755 --- a/contrib/inventory/openstack.py +++ b/contrib/inventory/openstack.py @@ -149,7 +149,7 @@ def get_host_groups_from_cloud(inventory): else: for server in servers: append_hostvars( - hostvars, groups, server['id'], servers[0], + hostvars, groups, server['id'], server, namegroup=True) groups['_meta'] = {'hostvars': hostvars} return groups From fe278202f2117b1822807969f94e32a54a5db761 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 15 Mar 2016 20:31:40 -0700 Subject: [PATCH 0995/1113] Fix saving of tracebacks on module failure --- lib/ansible/plugins/action/__init__.py | 13 ++++++------- lib/ansible/plugins/action/synchronize.py | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 917e16dec38..392b6704060 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -506,13 +506,12 @@ class ActionBase(with_metaclass(ABCMeta, object)): except ValueError: # not valid json, lets try to capture error data = dict(failed=True, parsed=False) - if 'stderr' in res and res['stderr'].startswith(u'Traceback'): - data['exception'] = res['stderr'] - else: - data['msg'] = "MODULE FAILURE" - data['module_stdout'] = res.get('stdout', u'') - if 'stderr' in res: - data['module_stderr'] = res['stderr'] + data['msg'] = "MODULE FAILURE" + data['module_stdout'] = res.get('stdout', u'') + if 'stderr' in res: + data['module_stderr'] = res['stderr'] + if res['stderr'].startswith(u'Traceback'): + data['exception'] = res['stderr'] # pre-split stdout into lines, if stdout is in the data and there # isn't already a stdout_lines value there diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index 9b267844b81..c0845d97f2c 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -327,6 +327,6 @@ class ActionModule(ActionBase): if 'SyntaxError' in result.get('exception', result.get('msg', '')): # Emit a warning about using python3 because synchronize is # somewhat unique in running on localhost - result['traceback'] = result['msg'] + result['exception'] = result['msg'] result['msg'] = 'SyntaxError parsing module. Perhaps invoking "python" on your local (or delegate_to) machine invokes python3. You can set ansible_python_interpreter for localhost (or the delegate_to machine) to the location of python2 to fix this' return result From 696b68f07a498dc85d281debd55992a49c5bf04a Mon Sep 17 00:00:00 2001 From: Daniel Kempkens <daniel+git@kempkens.io> Date: Wed, 16 Mar 2016 10:49:21 +0100 Subject: [PATCH 0996/1113] Check return value of get_distribution() On none-Linux systems `get_distribution()` returns `None`, which fails in `fetch_url`, because the return value of `get_distribution()` is not checked before calling `lower()` on the result. --- lib/ansible/module_utils/urls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 51779dc4e13..dd9b76256ab 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -842,7 +842,7 @@ def fetch_url(module, url, data=None, headers=None, method=None, info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200)) except NoSSLError, e: distribution = get_distribution() - if distribution.lower() == 'redhat': + if distribution is not None and distribution.lower() == 'redhat': module.fail_json(msg='%s. You can also install python-ssl from EPEL' % str(e)) else: module.fail_json(msg='%s' % str(e)) From 6efdaa6f4f3d6346a45c5c45260a9dc15e8e5ff8 Mon Sep 17 00:00:00 2001 From: karimb <karimboumedhel@gmail.com> Date: Wed, 16 Mar 2016 14:20:27 +0100 Subject: [PATCH 0997/1113] Include Ovirt in inventory list --- docsite/rst/intro_dynamic_inventory.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 71f64081763..7b212009cbe 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -290,7 +290,8 @@ In addition to Cobbler and EC2, inventory scripts are also available for:: Linode OpenShift OpenStack Nova - Red Hat's SpaceWalk + Ovirt + SpaceWalk Vagrant (not to be confused with the provisioner in vagrant, which is preferred) Zabbix From 2da4f04269d2c6321ed6ef55dedb122ed87ef37e Mon Sep 17 00:00:00 2001 From: jpic <jamespic@gmail.com> Date: Wed, 16 Mar 2016 18:13:07 +0100 Subject: [PATCH 0998/1113] Install coveralls, required by after_success --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index ff98c4c34a2..83fd3673dfa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,7 +27,7 @@ addons: packages: - python2.4 install: - - pip install tox + - pip install tox coveralls script: - ./test/utils/run_tests.sh after_success: From ec419446b7b9f1c3b13a15bc47fd0ec077ba0466 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Mar 2016 10:19:57 -0700 Subject: [PATCH 0999/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index a8841e68342..a98cd86f885 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit a8841e6834231ed63f8df4bab9d5996d9a78f0bf +Subproject commit a98cd86f8853c1e1a1441d0b61a71204bdd67702 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 45bba8ec64e..f9b96b9a8ad 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 45bba8ec64ebf908dd2e480ed620ee98d5665e53 +Subproject commit f9b96b9a8add347679044dd9f2737a8721cdf7f3 From 0ca4be6fb5b5bf8833c025f0aa3f96cb7b0a010b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Mar 2016 11:00:56 -0700 Subject: [PATCH 1000/1113] Update submodule refs for docs fixes --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index a98cd86f885..345d9cbca86 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit a98cd86f8853c1e1a1441d0b61a71204bdd67702 +Subproject commit 345d9cbca86a8202f3044261c84429c305bd78b8 From 0ad4ba5fe5926b6172f7139525e478c54639c954 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Mar 2016 11:03:00 -0700 Subject: [PATCH 1001/1113] Fix another docs link problem --- docsite/rst/intro_configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 419eeaf5f9d..87f43d1a91c 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -626,7 +626,7 @@ Additional paths can be provided separated by colon characters, in the same way Roles will be first searched for in the playbook directory. Should a role not be found, it will indicate all the possible paths that were searched. -.. _strategy_plugins: +.. _cfg_strategy_plugins: strategy_plugins ================== From b6d5c6888b162e017a59a32899a29d9e07c8dca2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Mar 2016 11:04:28 -0700 Subject: [PATCH 1002/1113] Update link target --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index d14c8412e38..7a5679ed63c 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -41,7 +41,7 @@ Each playbook is composed of one or more 'plays' in a list. The goal of a play is to map a group of hosts to some well defined roles, represented by things ansible calls tasks. At a basic level, a task is nothing more than a call -to an ansible module (see :doc:`Modules`). +to an ansible module (see :doc:`modules`). By composing a playbook of multiple 'plays', it is possible to orchestrate multi-machine deployments, running certain steps on all From de708f7199292ad6736c3b593b41642a80f248f7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Mar 2016 10:58:16 -0700 Subject: [PATCH 1003/1113] Document the issue with modules being created world-readable on the client in certain circumstances --- docsite/rst/become.rst | 66 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 63 insertions(+), 3 deletions(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 2698b226bb9..f7082d1d111 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -76,11 +76,71 @@ You cannot mix directives on the same object (become and sudo) though, Ansible w Become will default to using the old sudo/su configs and variables if they exist, but will override them if you specify any of the new ones. -.. note:: Privilege escalation methods must also be supported by the connection plugin used, most will warn if they do not, some will just ignore it as they always run as root (jail, chroot, etc). +Limitations +----------- -.. note:: Methods cannot be chained, you cannot use 'sudo /bin/su -' to become a user, you need to have privileges to run the command as that user in sudo or be able to su directly to it (the same for pbrun, pfexec or other supported methods). +Although privilege escalation is mostly intuitive, there are a few limitations +on how it works. Users should be aware of these to avoid surprises. + +Becoming an Unprivileged User +============================= + +Ansible has a limitation with regards to becoming an +unprivileged user that can be a security risk if users are not aware of it. +Ansible modules are executed on the remote machine by first substituting the +parameters into the module file, then copying the file to the remote machine, +and finally executing it there. If the module file is executed without using +become, when the become user is root, or when the connection to the remote +machine is made as root then the module file is created with permissions that +only allow reading by the user and root. + +If the become user is an unprivileged user and then Ansible has no choice but +to make the module file world readable as there's no other way for the user +Ansible connects as to save the file so that the user that we're becoming can +read it. + +If any of the parameters passed to the module are sensitive in nature then +those pieces of data are readable by reading the module file for the duration +of the Ansible module execution. Once the module is done executing Ansible +will delete the temporary file. If you trust the client machines then there's +no problem here. If you do not trust the client machines then this is +a potential danger. + +Ways to resolve this include: + +* Use :ref:`pipelining`. When pipelining is enabled, Ansible doesn't save the + module to a temporary file on the client. Instead it pipes the module to + the remote python interpreter's stdin. Pipelining does not work for + non-python modules. + +* Don't perform an action on the remote machine by becoming an unprivileged + user. Temporary files are protected by UNIX file permissions when you + become root or do not use become. + +Connection Plugin Support +========================= + +Privilege escalation methods must also be supported by the connection plugin +used. Most connection plugins will warn if they do not support become. Some +will just ignore it as they always run as root (jail, chroot, etc). + +Only one method may be enabled per host +======================================= + +Methods cannot be chained. You cannot use ``sudo /bin/su -`` to become a user, +you need to have privileges to run the command as that user in sudo or be able +to su directly to it (the same for pbrun, pfexec or other supported methods). + +Can't limit escalation to certain commands +========================================== + +Privilege escalation permissions have to be general. Ansible does not always +use a specific command to do something but runs modules (code) from +a temporary file name which changes every time. If you have '/sbin/service' +or '/bin/chmod' as the allowed commands this will fail with ansible as those +paths won't match with the temporary file that ansible creates to run the +module. -.. note:: Privilege escalation permissions have to be general, Ansible does not always use a specific command to do something but runs modules (code) from a temporary file name which changes every time. So if you have '/sbin/service' or '/bin/chmod' as the allowed commands this will fail with ansible. .. seealso:: From fee73100c4455305973f2fb8583ee2ea8f4e1dca Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Mar 2016 11:20:02 -0700 Subject: [PATCH 1004/1113] Move BUFSIZE to __init__ since it's common to many connection plugins --- lib/ansible/plugins/connection/__init__.py | 2 ++ lib/ansible/plugins/connection/chroot.py | 4 +--- lib/ansible/plugins/connection/docker.py | 4 +--- lib/ansible/plugins/connection/jail.py | 4 +--- lib/ansible/plugins/connection/libvirt_lxc.py | 4 +--- lib/ansible/plugins/connection/zone.py | 4 +--- 6 files changed, 7 insertions(+), 15 deletions(-) diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 8528a2e75d9..315293163b5 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -42,6 +42,8 @@ except ImportError: __all__ = ['ConnectionBase', 'ensure_connect'] +BUFSIZE = 65536 + def ensure_connect(func): @wraps(func) diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py index 7918ac5602c..65c37a08417 100644 --- a/lib/ansible/plugins/connection/chroot.py +++ b/lib/ansible/plugins/connection/chroot.py @@ -28,7 +28,7 @@ import traceback from ansible import constants as C from ansible.errors import AnsibleError -from ansible.plugins.connection import ConnectionBase +from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.module_utils.basic import is_executable from ansible.utils.unicode import to_bytes @@ -38,8 +38,6 @@ except ImportError: from ansible.utils.display import Display display = Display() -BUFSIZE = 65536 - class Connection(ConnectionBase): ''' Local chroot based connections ''' diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py index b1c499ed725..13c6d8dc90b 100644 --- a/lib/ansible/plugins/connection/docker.py +++ b/lib/ansible/plugins/connection/docker.py @@ -35,7 +35,7 @@ from distutils.version import LooseVersion import ansible.constants as C from ansible.errors import AnsibleError, AnsibleFileNotFound -from ansible.plugins.connection import ConnectionBase +from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.unicode import to_bytes try: @@ -44,8 +44,6 @@ except ImportError: from ansible.utils.display import Display display = Display() -BUFSIZE = 65536 - class Connection(ConnectionBase): ''' Local docker based connections ''' diff --git a/lib/ansible/plugins/connection/jail.py b/lib/ansible/plugins/connection/jail.py index d44213f439b..f5cd6d2ef1e 100644 --- a/lib/ansible/plugins/connection/jail.py +++ b/lib/ansible/plugins/connection/jail.py @@ -29,7 +29,7 @@ import traceback from ansible import constants as C from ansible.errors import AnsibleError -from ansible.plugins.connection import ConnectionBase +from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.unicode import to_bytes try: @@ -38,8 +38,6 @@ except ImportError: from ansible.utils.display import Display display = Display() -BUFSIZE = 65536 - class Connection(ConnectionBase): ''' Local BSD Jail based connections ''' diff --git a/lib/ansible/plugins/connection/libvirt_lxc.py b/lib/ansible/plugins/connection/libvirt_lxc.py index 9c4d6eac27c..2b92e9c9dc7 100644 --- a/lib/ansible/plugins/connection/libvirt_lxc.py +++ b/lib/ansible/plugins/connection/libvirt_lxc.py @@ -29,7 +29,7 @@ import traceback from ansible import constants as C from ansible.errors import AnsibleError -from ansible.plugins.connection import ConnectionBase +from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.unicode import to_bytes try: @@ -38,8 +38,6 @@ except ImportError: from ansible.utils.display import Display display = Display() -BUFSIZE = 65536 - class Connection(ConnectionBase): ''' Local lxc based connections ''' diff --git a/lib/ansible/plugins/connection/zone.py b/lib/ansible/plugins/connection/zone.py index b65c80b73fb..2bd49a5249f 100644 --- a/lib/ansible/plugins/connection/zone.py +++ b/lib/ansible/plugins/connection/zone.py @@ -30,7 +30,7 @@ import traceback from ansible import constants as C from ansible.errors import AnsibleError -from ansible.plugins.connection import ConnectionBase +from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils import to_bytes try: @@ -39,8 +39,6 @@ except ImportError: from ansible.utils.display import Display display = Display() -BUFSIZE = 65536 - class Connection(ConnectionBase): ''' Local zone based connections ''' From de306eb5da7054a001e0b630db50732bf178e8a8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Mar 2016 11:22:50 -0700 Subject: [PATCH 1005/1113] Small cleanup to use class attribute directly instead of property for transport names --- lib/ansible/plugins/connection/local.py | 5 +---- lib/ansible/plugins/connection/paramiko_ssh.py | 5 +---- lib/ansible/plugins/connection/winrm.py | 6 +----- 3 files changed, 3 insertions(+), 13 deletions(-) diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py index 79ba8ab8bc1..9a956ca7359 100644 --- a/lib/ansible/plugins/connection/local.py +++ b/lib/ansible/plugins/connection/local.py @@ -43,10 +43,7 @@ except ImportError: class Connection(ConnectionBase): ''' Local based connections ''' - @property - def transport(self): - ''' used to identify this connection object ''' - return 'local' + transport = 'local' def _connect(self): ''' connect to the local host; nothing to do here ''' diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py index 557acc96900..150d168bf5f 100644 --- a/lib/ansible/plugins/connection/paramiko_ssh.py +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -126,10 +126,7 @@ SFTP_CONNECTION_CACHE = {} class Connection(ConnectionBase): ''' SSH based connections with Paramiko ''' - @property - def transport(self): - ''' used to identify this connection object from other classes ''' - return 'paramiko' + transport = 'paramiko' def _cache_key(self): return "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user) diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py index 125e54cb9aa..3c079a1f2e2 100644 --- a/lib/ansible/plugins/connection/winrm.py +++ b/lib/ansible/plugins/connection/winrm.py @@ -61,6 +61,7 @@ except ImportError: class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' + transport = 'winrm' module_implementation_preferences = ('.ps1', '') become_methods = [] allow_executable = False @@ -77,11 +78,6 @@ class Connection(ConnectionBase): super(Connection, self).__init__(*args, **kwargs) - @property - def transport(self): - ''' used to identify this connection object from other classes ''' - return 'winrm' - def set_host_overrides(self, host): ''' Override WinRM-specific options from host variables. From 9763e76f6f2606442ff033155f7d15f1cc0b9f8f Mon Sep 17 00:00:00 2001 From: Alexey Kalinin <a.kalinin@f5.com> Date: Wed, 16 Mar 2016 11:24:20 -0700 Subject: [PATCH 1006/1113] add find_host_portgroup_by_name function to vmware utils --- lib/ansible/module_utils/vmware.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/ansible/module_utils/vmware.py b/lib/ansible/module_utils/vmware.py index a0999c05442..9950cdd937c 100644 --- a/lib/ansible/module_utils/vmware.py +++ b/lib/ansible/module_utils/vmware.py @@ -108,6 +108,14 @@ def find_vm_by_name(content, vm_name): return None +def find_host_portgroup_by_name(host, portgroup_name): + + for portgroup in host.config.network.portgroup: + if portgroup.spec.name == portgroup_name: + return portgroup + return None + + def vmware_argument_spec(): return dict( From a8acd7f93e2c97de269ad27050a715a5dc040273 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Mar 2016 11:39:51 -0700 Subject: [PATCH 1007/1113] Enable pipelining for jail connection plugin --- lib/ansible/plugins/connection/jail.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/lib/ansible/plugins/connection/jail.py b/lib/ansible/plugins/connection/jail.py index f5cd6d2ef1e..844d94c51f0 100644 --- a/lib/ansible/plugins/connection/jail.py +++ b/lib/ansible/plugins/connection/jail.py @@ -45,7 +45,7 @@ class Connection(ConnectionBase): transport = 'jail' # Pipelining may work. Someone needs to test by setting this to True and # having pipelining=True in their ansible.cfg - has_pipelining = False + has_pipelining = True # Some become_methods may work in v2 (sudo works for other chroot-based # plugins while su seems to be failing). If some work, check chroot.py to # see how to disable just some methods. @@ -119,13 +119,6 @@ class Connection(ConnectionBase): ''' run a command on the jail ''' super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - # TODO: Check whether we can send the command to stdin via - # p.communicate(in_data) - # If we can, then we can change this plugin to has_pipelining=True and - # remove the error if in_data is given. - if in_data: - raise AnsibleError("Internal Error: this module does not support optimized module pipelining") - p = self._buffered_exec_command(cmd) stdout, stderr = p.communicate(in_data) From 2095d1ab33551627e8fb098801705b633c4e0ee2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guido=20G=C3=BCnther?= <agx@sigxcpu.org> Date: Wed, 16 Mar 2016 19:55:37 +0100 Subject: [PATCH 1008/1113] Integration tests for zypper repository --- test/integration/destructive.yml | 1 + .../test_zypper_repository/meta/main.yml | 2 + .../test_zypper_repository/tasks/main.yml | 22 ++++++++++ .../tasks/zypper_repository.yml | 43 +++++++++++++++++++ 4 files changed, 68 insertions(+) create mode 100644 test/integration/roles/test_zypper_repository/meta/main.yml create mode 100644 test/integration/roles/test_zypper_repository/tasks/main.yml create mode 100644 test/integration/roles/test_zypper_repository/tasks/zypper_repository.yml diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index 3e8cca385e6..5dd590f8569 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -19,3 +19,4 @@ - { role: test_mysql_variables, tags: test_mysql_variables} - { role: test_docker, tags: test_docker, when: ansible_distribution != "Fedora" } - { role: test_zypper, tags: test_zypper} + - { role: test_zypper_repository, tags: test_zypper_repository} diff --git a/test/integration/roles/test_zypper_repository/meta/main.yml b/test/integration/roles/test_zypper_repository/meta/main.yml new file mode 100644 index 00000000000..07faa217762 --- /dev/null +++ b/test/integration/roles/test_zypper_repository/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/roles/test_zypper_repository/tasks/main.yml b/test/integration/roles/test_zypper_repository/tasks/main.yml new file mode 100644 index 00000000000..a805d6e25cd --- /dev/null +++ b/test/integration/roles/test_zypper_repository/tasks/main.yml @@ -0,0 +1,22 @@ +# test code for the zypper repository module +# +# (c) 2016, Guido Günther <agx@sigxcpu.org> + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +- include: 'zypper_repository.yml' + when: ansible_distribution in ['SLES', 'openSUSE'] + diff --git a/test/integration/roles/test_zypper_repository/tasks/zypper_repository.yml b/test/integration/roles/test_zypper_repository/tasks/zypper_repository.yml new file mode 100644 index 00000000000..d363e368a70 --- /dev/null +++ b/test/integration/roles/test_zypper_repository/tasks/zypper_repository.yml @@ -0,0 +1,43 @@ +- name: Delete + zypper_repository: + name: test + state: absent + register: zypper_result + +- name: Add repo + zypper_repository: + name: test + state: present + repo: http://dl.google.com/linux/chrome/rpm/stable/x86_64 + register: zypper_result + +- debug: var=zypper_result + +- name: verify repo addition + assert: + that: + - "zypper_result.changed" + +- name: Add repo again + zypper_repository: + name: test + state: present + repo: http://dl.google.com/linux/chrome/rpm/stable/x86_64 + register: zypper_result + +- name: verify no change on second install + assert: + that: + - "not zypper_result.changed" + +- name: Change repo URL + zypper_repository: + name: test + state: present + repo: http://download.videolan.org/pub/vlc/SuSE/Leap_42.1/ + register: zypper_result + +- name: Verify change on URL only change + assert: + that: + - "zypper_result.changed" From 1346c209b0daed77dfc79fec6d670ba13ff66f70 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Mar 2016 12:47:52 -0700 Subject: [PATCH 1009/1113] Add changes necessary for enabling pipelining for local connections --- lib/ansible/plugins/connection/local.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py index 9a956ca7359..00ee3bc7071 100644 --- a/lib/ansible/plugins/connection/local.py +++ b/lib/ansible/plugins/connection/local.py @@ -19,9 +19,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os +import select import shutil import subprocess -import select import fcntl import getpass @@ -44,6 +44,7 @@ class Connection(ConnectionBase): ''' Local based connections ''' transport = 'local' + has_pipelining = True def _connect(self): ''' connect to the local host; nothing to do here ''' @@ -65,8 +66,6 @@ class Connection(ConnectionBase): display.debug("in local.exec_command()") - if in_data: - raise AnsibleError("Internal Error: this module does not support optimized module pipelining") executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None display.vvv(u"{0} EXEC {1}".format(self._play_context.remote_addr, cmd)) @@ -112,7 +111,7 @@ class Connection(ConnectionBase): fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) display.debug("getting output with communicate()") - stdout, stderr = p.communicate() + stdout, stderr = p.communicate(in_data) display.debug("done communicating") display.debug("done with local.exec_command()") From 7346e3098a7c43c6cdc8cdf704d84d52287dd8d7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 16 Mar 2016 13:09:45 -0700 Subject: [PATCH 1010/1113] Update extras submodule ref --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f9b96b9a8ad..f47b499bb99 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f9b96b9a8add347679044dd9f2737a8721cdf7f3 +Subproject commit f47b499bb9935b77b92f71eecfe981b9073184d6 From 61f05c2e8d4055da4745c9b060c09beec3bb33c5 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 16 Mar 2016 13:09:27 -0700 Subject: [PATCH 1011/1113] strip keys recursively --- lib/ansible/vars/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 1405d8736dd..e881c47a5b4 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -83,6 +83,8 @@ def strip_internal_keys(dirty): for k in dirty.keys(): if isinstance(k, string_types) and k.startswith('_ansible_'): del clean[k] + elif isinstance(dirty[k], dict): + clean[k] = strip_internal_keys(dirty[k]) return clean class VariableManager: From d1e0719e4cd16fdd9d85dcf59a5c5c3741fa6583 Mon Sep 17 00:00:00 2001 From: qqo <qqo@users.noreply.github.com> Date: Wed, 16 Mar 2016 23:47:16 +0300 Subject: [PATCH 1012/1113] fix TypeError: coercing to Unicode: need string or buffer, AnsibleParserError found --- lib/ansible/utils/display.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index e94c8f0397b..220bbd381dd 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -108,7 +108,7 @@ class Display: """ Display a message to the user Note: msg *must* be a unicode string to prevent UnicodeError tracebacks. - """ + """ # FIXME: this needs to be implemented #msg = utils.sanitize_output(msg) @@ -273,7 +273,7 @@ class Display: wrapped = textwrap.wrap(new_msg, self.columns) new_msg = u"\n".join(wrapped) + u"\n" else: - new_msg = u"ERROR! " + msg + new_msg = u"ERROR! %s" % msg if new_msg not in self._errors: self.display(new_msg, color=C.COLOR_ERROR, stderr=True) self._errors[new_msg] = 1 From 85e843baeeeb2f88dd17c7c987dcdbb992b29553 Mon Sep 17 00:00:00 2001 From: qqo <qqo@users.noreply.github.com> Date: Thu, 17 Mar 2016 00:50:47 +0300 Subject: [PATCH 1013/1113] Revert "fix TypeError: coercing to Unicode: need string or buffer, AnsibleParserError found" This reverts commit d1e0719e4cd16fdd9d85dcf59a5c5c3741fa6583. --- lib/ansible/utils/display.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 220bbd381dd..e94c8f0397b 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -108,7 +108,7 @@ class Display: """ Display a message to the user Note: msg *must* be a unicode string to prevent UnicodeError tracebacks. - """ + """ # FIXME: this needs to be implemented #msg = utils.sanitize_output(msg) @@ -273,7 +273,7 @@ class Display: wrapped = textwrap.wrap(new_msg, self.columns) new_msg = u"\n".join(wrapped) + u"\n" else: - new_msg = u"ERROR! %s" % msg + new_msg = u"ERROR! " + msg if new_msg not in self._errors: self.display(new_msg, color=C.COLOR_ERROR, stderr=True) self._errors[new_msg] = 1 From a7f89cac63d22c2b33f7ce7c6f0846c47e77d8a8 Mon Sep 17 00:00:00 2001 From: qqo <qqo@users.noreply.github.com> Date: Thu, 17 Mar 2016 00:56:47 +0300 Subject: [PATCH 1014/1113] fix TypeError: coercing to Unicode: need string or buffer, AnsibleParserError found --- lib/ansible/plugins/strategy/linear.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 0d4faf61493..750f1540516 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -29,6 +29,7 @@ from ansible.playbook.task import Task from ansible.plugins import action_loader from ansible.plugins.strategy import StrategyBase from ansible.template import Templar +from ansible.utils.unicode import to_unicode try: from __main__ import display @@ -330,7 +331,7 @@ class StrategyModule(StrategyBase): for host in included_file._hosts: self._tqm._failed_hosts[host.name] = True iterator.mark_host_failed(host) - display.error(e, wrap_text=False) + display.error(to_unicode(e), wrap_text=False) include_failure = True continue From ed457ad53b01ce9045f9f8db53c6816fdc8581da Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Thu, 17 Mar 2016 00:18:13 +0100 Subject: [PATCH 1015/1113] Fix the color when unreachable in default cb plugin The dedicated color for unreachable errors was not being used in the default cb plugin. This corrects the color. --- lib/ansible/plugins/callback/default.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 1bcb4b244db..bbce48025ad 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -103,9 +103,9 @@ class CallbackModule(CallbackBase): def v2_runner_on_unreachable(self, result): delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) + self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_UNREACHABLE) else: - self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR) + self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE) def v2_playbook_on_no_hosts_matched(self): self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP) From 0cb804f0c23b2e71c3156f584b9e103e8d0c624a Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 16 Mar 2016 16:59:44 -0700 Subject: [PATCH 1016/1113] fix lsb fact gathering was erroring out when rc !=0 also fixed redundant paths fixes #14965 --- lib/ansible/module_utils/facts.py | 33 +++++++++++++------------------ 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 4612816f53d..335248f5361 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -631,22 +631,20 @@ class Facts(object): rc, out, err = self.module.run_command([lsb_path, "-a"]) if rc == 0: self.facts['lsb'] = {} - for line in out.split('\n'): - if len(line) < 1 or ':' not in line: - continue - value = line.split(':', 1)[1].strip() - if 'LSB Version:' in line: - self.facts['lsb']['release'] = value - elif 'Distributor ID:' in line: - self.facts['lsb']['id'] = value - elif 'Description:' in line: - self.facts['lsb']['description'] = value - elif 'Release:' in line: - self.facts['lsb']['release'] = value - elif 'Codename:' in line: - self.facts['lsb']['codename'] = value - if 'lsb' in self.facts and 'release' in self.facts['lsb']: - self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0] + for line in out.split('\n'): + if len(line) < 1 or ':' not in line: + continue + value = line.split(':', 1)[1].strip() + if 'LSB Version:' in line: + self.facts['lsb']['release'] = value + elif 'Distributor ID:' in line: + self.facts['lsb']['id'] = value + elif 'Description:' in line: + self.facts['lsb']['description'] = value + elif 'Release:' in line: + self.facts['lsb']['release'] = value + elif 'Codename:' in line: + self.facts['lsb']['codename'] = value elif lsb_path is None and os.path.exists('/etc/lsb-release'): self.facts['lsb'] = {} for line in get_file_lines('/etc/lsb-release'): @@ -659,13 +657,10 @@ class Facts(object): self.facts['lsb']['description'] = value elif 'DISTRIB_CODENAME' in line: self.facts['lsb']['codename'] = value - else: - return self.facts if 'lsb' in self.facts and 'release' in self.facts['lsb']: self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0] - def get_selinux_facts(self): if not HAVE_SELINUX: self.facts['selinux'] = False From 6a1f97ad4231a96238255cf904a1f68e2da2ace8 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Thu, 10 Mar 2016 13:09:44 -0800 Subject: [PATCH 1017/1113] Add docs for mysql* modules connect_timeout. --- lib/ansible/utils/module_docs_fragments/mysql.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/utils/module_docs_fragments/mysql.py b/lib/ansible/utils/module_docs_fragments/mysql.py index 5dd1e04f93b..e57acab9b40 100644 --- a/lib/ansible/utils/module_docs_fragments/mysql.py +++ b/lib/ansible/utils/module_docs_fragments/mysql.py @@ -47,6 +47,12 @@ options: - The path to a Unix domain socket for local connections required: false default: null + connect_timeout: + description: + - The connection timeout when connecting to the MySQL server. + required: false + default: 30 + version_added: "2.1" config_file: description: - Specify a config file from which user and password are to be read From 9a922fcf0d371953314db5e0c7d170767c8a6043 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Thu, 10 Mar 2016 13:33:08 -0800 Subject: [PATCH 1018/1113] Update test to use mysql connect_timeout option. --- test/integration/roles/test_mysql_variables/tasks/main.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/integration/roles/test_mysql_variables/tasks/main.yml b/test/integration/roles/test_mysql_variables/tasks/main.yml index 192472bf46f..26fc03c8679 100644 --- a/test/integration/roles/test_mysql_variables/tasks/main.yml +++ b/test/integration/roles/test_mysql_variables/tasks/main.yml @@ -194,11 +194,8 @@ #============================================================ # Verify mysql_variable fails with an incorrect login_host parameter # -- name: lower mysql connect timeout - ini_file: dest="{{ansible_env.HOME}}/.my.cnf" section=client option=connect_timeout value=5 - - name: query mysql_variable using incorrect login_host - mysql_variables: variable=wait_timeout login_host=12.0.0.9 + mysql_variables: variable=wait_timeout login_host=12.0.0.9 connect_timeout=5 register: result ignore_errors: true From 0e68c6d6fefd089b045a1f12c3de17dc64b4037c Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Mar 2016 02:01:16 -0400 Subject: [PATCH 1019/1113] Cleaning up use of literal_eval in basic.py AnsibleModule.safe_eval --- lib/ansible/module_utils/basic.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 26c17154b0e..fe9647265dd 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1263,11 +1263,7 @@ class AnsibleModule(object): return (str, None) return str try: - result = None - if not locals: - result = literal_eval(str) - else: - result = literal_eval(str, None, locals) + result = literal_eval(str) if include_exceptions: return (result, None) else: From 8eadc1d8ebd7ec90fac2ae386de69c9d783e0d95 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Mar 2016 02:01:47 -0400 Subject: [PATCH 1020/1113] Adding more unit tests for AnsibleModule things in basic.py --- .../basic/test__log_invocation.py | 58 +++++++++++++++++ .../module_utils/basic/test_safe_eval.py | 64 +++++++++++++++++++ test/units/module_utils/test_basic.py | 6 ++ 3 files changed, 128 insertions(+) create mode 100644 test/units/module_utils/basic/test__log_invocation.py create mode 100644 test/units/module_utils/basic/test_safe_eval.py diff --git a/test/units/module_utils/basic/test__log_invocation.py b/test/units/module_utils/basic/test__log_invocation.py new file mode 100644 index 00000000000..a08a2d84ca0 --- /dev/null +++ b/test/units/module_utils/basic/test__log_invocation.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# (c) 2016, James Cammarata <jimi@sngx.net> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division) +__metaclass__ = type + +import json + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import MagicMock + +class TestModuleUtilsBasic(unittest.TestCase): + + def test_module_utils_basic__log_invocation(self): + from ansible.module_utils import basic + + # test basic log invocation + basic.MODULE_COMPLEX_ARGS = json.dumps(dict(foo=False, bar=[1,2,3], bam="bam", baz=u'baz')) + am = basic.AnsibleModule( + argument_spec=dict( + foo = dict(default=True, type='bool'), + bar = dict(default=[], type='list'), + bam = dict(default="bam"), + baz = dict(default=u"baz"), + password = dict(default=True), + no_log = dict(default="you shouldn't see me", no_log=True), + ), + ) + + am.log = MagicMock() + am._log_invocation() + am.log.assert_called_with( + 'Invoked with bam=bam bar=[1, 2, 3] foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD ', + log_args={ + 'foo': 'False', + 'bar': '[1, 2, 3]', + 'bam': 'bam', + 'baz': 'baz', + 'password': 'NOT_LOGGING_PASSWORD', + 'no_log': 'NOT_LOGGING_PARAMETER', + }, + ) diff --git a/test/units/module_utils/basic/test_safe_eval.py b/test/units/module_utils/basic/test_safe_eval.py new file mode 100644 index 00000000000..32a2c4c27a3 --- /dev/null +++ b/test/units/module_utils/basic/test_safe_eval.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division) +__metaclass__ = type + +from ansible.compat.tests import unittest + + +class TestAnsibleModuleExitJson(unittest.TestCase): + + def test_module_utils_basic_safe_eval(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + # test some basic usage + # string (and with exceptions included), integer, bool + self.assertEqual(am.safe_eval("'a'"), 'a') + self.assertEqual(am.safe_eval("'a'", include_exceptions=True), ('a', None)) + self.assertEqual(am.safe_eval("1"), 1) + self.assertEqual(am.safe_eval("True"), True) + self.assertEqual(am.safe_eval("False"), False) + self.assertEqual(am.safe_eval("{}"), {}) + # not passing in a string to convert + self.assertEqual(am.safe_eval({'a':1}), {'a':1}) + self.assertEqual(am.safe_eval({'a':1}, include_exceptions=True), ({'a':1}, None)) + # invalid literal eval + self.assertEqual(am.safe_eval("a=1"), "a=1") + res = am.safe_eval("a=1", include_exceptions=True) + self.assertEqual(res[0], "a=1") + self.assertEqual(type(res[1]), SyntaxError) + self.assertEqual(am.safe_eval("a.foo()"), "a.foo()") + res = am.safe_eval("a.foo()", include_exceptions=True) + self.assertEqual(res[0], "a.foo()") + self.assertEqual(res[1], None) + self.assertEqual(am.safe_eval("import foo"), "import foo") + res = am.safe_eval("import foo", include_exceptions=True) + self.assertEqual(res[0], "import foo") + self.assertEqual(res[1], None) + self.assertEqual(am.safe_eval("__import__('foo')"), "__import__('foo')") + res = am.safe_eval("__import__('foo')", include_exceptions=True) + self.assertEqual(res[0], "__import__('foo')") + self.assertEqual(type(res[1]), ValueError) + diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py index 61914e67bc6..0a4ed0763d8 100644 --- a/test/units/module_utils/test_basic.py +++ b/test/units/module_utils/test_basic.py @@ -99,6 +99,12 @@ class TestModuleUtilsBasic(unittest.TestCase): mock_import.side_effect = _mock_import mod = builtins.__import__('ansible.module_utils.basic') + # FIXME: doesn't work yet + #@patch.object(builtins, 'bytes') + #def test_module_utils_basic_bytes(self, mock_bytes): + # mock_bytes.side_effect = NameError() + # from ansible.module_utils import basic + @patch.object(builtins, '__import__') @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_module_utils_basic_import_literal_eval(self, mock_import): From bc60f52db5a180cbac72405ae2d75f169fb66ca3 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 17 Mar 2016 07:19:21 -0700 Subject: [PATCH 1021/1113] avoid exceptiosn when not being called by CLI --- lib/ansible/plugins/callback/slack.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/slack.py b/lib/ansible/plugins/callback/slack.py index 575a1112c6a..9fc406fd34e 100644 --- a/lib/ansible/plugins/callback/slack.py +++ b/lib/ansible/plugins/callback/slack.py @@ -23,7 +23,10 @@ import json import os import uuid -from __main__ import cli +try: + from __main__ import cli +except ImportError: + cli = None from ansible.constants import mk_boolean from ansible.module_utils.urls import open_url From 84bbd2b4e3c4f98c05ca67ad6f4c3d4793ca7ea6 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 17 Mar 2016 07:25:37 -0700 Subject: [PATCH 1022/1113] corrected version added for ansible_version --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index c03037ad909..bcdcaad3313 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -500,7 +500,7 @@ In this pattern however, you could also write a fact module as well, and may wis Ansible version ``````````````` -.. versionadded:: 2.0 +.. versionadded:: 1.8 To adapt playbook behavior to specific version of ansible, a variable ansible_version is available, with the following structure:: From f10de91e7f0cb2f48042c512850ae7991b2498fd Mon Sep 17 00:00:00 2001 From: Evgeni Golov <egolov@redhat.com> Date: Thu, 17 Mar 2016 18:06:42 +0100 Subject: [PATCH 1023/1113] moden apt can install local debs with dependencies just fine --- packaging/debian/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packaging/debian/README.md b/packaging/debian/README.md index 8051b2cfa4a..f170c13333f 100644 --- a/packaging/debian/README.md +++ b/packaging/debian/README.md @@ -17,3 +17,7 @@ To install the Ansible DEB package and resolve dependencies: sudo dpkg -i <package-file> sudo apt-get -fy install + +Or, if you are running Debian Stretch (or later) or Ubuntu Xenial (or later): + + sudo apt install <package-file> From 3fd74890d33cddd492676aff36d8b8f2824fa14e Mon Sep 17 00:00:00 2001 From: Evgeni Golov <egolov@redhat.com> Date: Thu, 17 Mar 2016 18:13:39 +0100 Subject: [PATCH 1024/1113] migrate to dh-python --- packaging/debian/control | 4 ++-- packaging/debian/rules | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/packaging/debian/control b/packaging/debian/control index 13f0c5b42de..e5dc2d44970 100644 --- a/packaging/debian/control +++ b/packaging/debian/control @@ -3,12 +3,12 @@ Section: admin Priority: optional Standards-Version: 3.9.3 Maintainer: Ansible, Inc. <support@ansible.com> -Build-Depends: cdbs, debhelper (>= 5.0.0), asciidoc, python, python-support, python-setuptools +Build-Depends: cdbs, debhelper (>= 5.0.0), asciidoc, python, dh-python, python-setuptools Homepage: http://ansible.github.com/ Package: ansible Architecture: all -Depends: python, python-support (>= 0.90), python-jinja2, python-yaml, python-paramiko, python-httplib2, python-six, python-crypto (>= 2.6), python-setuptools, sshpass, ${misc:Depends} +Depends: python-jinja2, python-yaml, python-paramiko, python-httplib2, python-six, python-crypto (>= 2.6), python-setuptools, sshpass, ${misc:Depends}, ${python:Depends} Description: A radically simple IT automation platform A radically simple IT automation platform that makes your applications and systems easier to deploy. Avoid writing scripts or custom code to deploy and diff --git a/packaging/debian/rules b/packaging/debian/rules index 1b4a1575c91..f5894b7a43b 100755 --- a/packaging/debian/rules +++ b/packaging/debian/rules @@ -1,6 +1,7 @@ #!/usr/bin/make -f # -- makefile -- +DEB_PYTHON2_MODULE_PACKAGES=ansible + include /usr/share/cdbs/1/rules/debhelper.mk -DEB_PYTHON_SYSTEM = pysupport include /usr/share/cdbs/1/class/python-distutils.mk From de2fe08d9f5db3509fa0f4c24c705f33f3a68340 Mon Sep 17 00:00:00 2001 From: Evgeni Golov <egolov@redhat.com> Date: Thu, 17 Mar 2016 18:28:23 +0100 Subject: [PATCH 1025/1113] update README.md --- packaging/debian/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/debian/README.md b/packaging/debian/README.md index f170c13333f..22e584ff5c7 100644 --- a/packaging/debian/README.md +++ b/packaging/debian/README.md @@ -4,7 +4,7 @@ Ansible Debian Package To create an Ansible DEB package: sudo apt-get install python-paramiko python-yaml python-jinja2 python-httplib2 python-setuptools python-six sshpass - sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot asciidoc devscripts docbook-xml xsltproc + sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro dh-python fakeroot asciidoc devscripts docbook-xml xsltproc libxml2-utils git clone git://github.com/ansible/ansible.git cd ansible make deb From 292be944c6259af4208f99fedc5c4bb418022da9 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Thu, 17 Mar 2016 08:50:18 -0700 Subject: [PATCH 1026/1113] ensure we use delegated vars on delegation this prevents falling back to connection vars from the inventory_hostname when matching connection var is not in delegated host. --- lib/ansible/playbook/play_context.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 34b9affade6..a48f7395b49 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -359,12 +359,15 @@ class PlayContext(Base): for variable_name in variable_names: if attr in attrs_considered: continue - if isinstance(delegated_vars, dict) and variable_name in delegated_vars: - setattr(new_info, attr, delegated_vars[variable_name]) - attrs_considered.append(attr) + # if delegation task ONLY use delegated host vars, avoid delegated FOR host vars + if task.delegate_to is not None: + if isinstance(delegated_vars, dict) and variable_name in delegated_vars: + setattr(new_info, attr, delegated_vars[variable_name]) + attrs_considered.append(attr) elif variable_name in variables: setattr(new_info, attr, variables[variable_name]) attrs_considered.append(attr) + # no else, as no other vars should be considered # make sure we get port defaults if needed if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None: From 53756af546b91179b0a9cf23f5ec80c21afb22d8 Mon Sep 17 00:00:00 2001 From: Hagai Kariti <hkariti@gmail.com> Date: Sat, 9 Aug 2014 16:10:38 +0300 Subject: [PATCH 1027/1113] Added hostname_variable to ec2 inventory --- contrib/inventory/ec2.ini | 5 +++ contrib/inventory/ec2.py | 90 +++++++++++++++++++++++++++------------ 2 files changed, 67 insertions(+), 28 deletions(-) diff --git a/contrib/inventory/ec2.ini b/contrib/inventory/ec2.ini index 25947a88f0f..7f4535c8a73 100644 --- a/contrib/inventory/ec2.ini +++ b/contrib/inventory/ec2.ini @@ -29,6 +29,11 @@ regions_exclude = us-gov-west-1,cn-north-1 # in the event of a collision. destination_variable = public_dns_name +# This allows you to override the inventory_name with an ec2 variable, instead +# of using the destination_variable above. Addressing (aka ansible_ssh_host) +# will still use destination_variable. Tags should be written as 'tag_TAGNAME'. +#hostname_variable = tag_Name + # For server inside a VPC, using DNS names may not make sense. When an instance # has 'subnet_id' set, this variable is used. If the subnet is public, setting # this to 'ip_address' will return the public IP address. For instances in a diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index 700b51a839e..e49b20245c5 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -236,7 +236,11 @@ class Ec2Inventory(object): # Destination addresses self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') - + if config.has_option('ec2', 'hostname_variable'): + self.hostname_variable = config.get('ec2', 'hostname_variable') + else: + self.hostname_variable = None + # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') self.route53_excluded_zones = [] @@ -628,32 +632,46 @@ class Ec2Inventory(object): # Skip instances we cannot address (e.g. private VPC subnet) return + # Set the inventory name + hostname = None + if self.hostname_variable: + if self.hostname_variable.startswith('tag_'): + hostname = instance.tags.get(self.hostname_variable[4:], None) + else: + hostname = getattr(instance, self.hostname_variable) + + # If we can't get a nice hostname, use the destination address + if not hostname: + hostname = dest + + hostname = self.to_safe(hostname).lower() + # if we only want to include hosts that match a pattern, skip those that don't - if self.pattern_include and not self.pattern_include.match(dest): + if self.pattern_include and not self.pattern_include.match(hostname): return # if we need to exclude hosts that match a pattern, skip those - if self.pattern_exclude and self.pattern_exclude.match(dest): + if self.pattern_exclude and self.pattern_exclude.match(hostname): return # Add to index - self.index[dest] = [region, instance.id] + self.index[hostname] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: - self.inventory[instance.id] = [dest] + self.inventory[instance.id] = [hostname] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: - self.push(self.inventory, region, dest) + self.push(self.inventory, region, hostname) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: - self.push(self.inventory, instance.placement, dest) + self.push(self.inventory, instance.placement, hostname) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.placement) @@ -662,28 +680,28 @@ class Ec2Inventory(object): # Inventory: Group by Amazon Machine Image (AMI) ID if self.group_by_ami_id: ami_id = self.to_safe(instance.image_id) - self.push(self.inventory, ami_id, dest) + self.push(self.inventory, ami_id, hostname) if self.nested_groups: self.push_group(self.inventory, 'images', ami_id) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_type) - self.push(self.inventory, type_name, dest) + self.push(self.inventory, type_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by key pair if self.group_by_key_pair and instance.key_name: key_name = self.to_safe('key_' + instance.key_name) - self.push(self.inventory, key_name, dest) + self.push(self.inventory, key_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) - self.push(self.inventory, vpc_id_name, dest) + self.push(self.inventory, vpc_id_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) @@ -692,7 +710,7 @@ class Ec2Inventory(object): try: for group in instance.groups: key = self.to_safe("security_group_" + group.name) - self.push(self.inventory, key, dest) + self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: @@ -712,7 +730,7 @@ class Ec2Inventory(object): key = self.to_safe("tag_" + k + "=" + v) else: key = self.to_safe("tag_" + k) - self.push(self.inventory, key, dest) + self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) if v: @@ -722,20 +740,21 @@ class Ec2Inventory(object): if self.route53_enabled and self.group_by_route53_names: route53_names = self.get_instance_route53_names(instance) for name in route53_names: - self.push(self.inventory, name, dest) + self.push(self.inventory, name, hostname) if self.nested_groups: self.push_group(self.inventory, 'route53', name) # Global Tag: instances without tags if self.group_by_tag_none and len(instance.tags) == 0: - self.push(self.inventory, 'tag_none', dest) + self.push(self.inventory, 'tag_none', hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', 'tag_none') # Global Tag: tag all EC2 instances - self.push(self.inventory, 'ec2', dest) + self.push(self.inventory, 'ec2', hostname) - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest def add_rds_instance(self, instance, region): @@ -753,24 +772,38 @@ class Ec2Inventory(object): # Skip instances we cannot address (e.g. private VPC subnet) return + # Set the inventory name + hostname = None + if self.hostname_variable: + if self.hostname_variable.startswith('tag_'): + hostname = instance.tags.get(self.hostname_variable[4:], None) + else: + hostname = getattr(instance, self.hostname_variable) + + # If we can't get a nice hostname, use the destination address + if not hostname: + hostname = dest + + hostname = self.to_safe(hostname).lower() + # Add to index - self.index[dest] = [region, instance.id] + self.index[hostname] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: - self.inventory[instance.id] = [dest] + self.inventory[instance.id] = [hostname] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: - self.push(self.inventory, region, dest) + self.push(self.inventory, region, hostname) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: - self.push(self.inventory, instance.availability_zone, dest) + self.push(self.inventory, instance.availability_zone, hostname) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.availability_zone) @@ -779,14 +812,14 @@ class Ec2Inventory(object): # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_class) - self.push(self.inventory, type_name, dest) + self.push(self.inventory, type_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) - self.push(self.inventory, vpc_id_name, dest) + self.push(self.inventory, vpc_id_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) @@ -795,7 +828,7 @@ class Ec2Inventory(object): try: if instance.security_group: key = self.to_safe("security_group_" + instance.security_group.name) - self.push(self.inventory, key, dest) + self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) @@ -806,20 +839,21 @@ class Ec2Inventory(object): # Inventory: Group by engine if self.group_by_rds_engine: - self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) + self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) if self.nested_groups: self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) # Inventory: Group by parameter group if self.group_by_rds_parameter_group: - self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) + self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) if self.nested_groups: self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) # Global Tag: all RDS instances - self.push(self.inventory, 'rds', dest) + self.push(self.inventory, 'rds', hostname) - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) + self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest def add_elasticache_cluster(self, cluster, region): ''' Adds an ElastiCache cluster to the inventory and index, as long as From 8a17da299f9b91129c85d3c96335a27384eab812 Mon Sep 17 00:00:00 2001 From: Hagai Kariti <hkariti@gmail.com> Date: Sat, 9 Aug 2014 16:13:45 +0300 Subject: [PATCH 1028/1113] Clean up lines with only whitespaces in ec2.py --- contrib/inventory/ec2.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index e49b20245c5..e807884e345 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -240,7 +240,7 @@ class Ec2Inventory(object): self.hostname_variable = config.get('ec2', 'hostname_variable') else: self.hostname_variable = None - + # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') self.route53_excluded_zones = [] @@ -643,7 +643,7 @@ class Ec2Inventory(object): # If we can't get a nice hostname, use the destination address if not hostname: hostname = dest - + hostname = self.to_safe(hostname).lower() # if we only want to include hosts that match a pattern, skip those that don't @@ -783,7 +783,7 @@ class Ec2Inventory(object): # If we can't get a nice hostname, use the destination address if not hostname: hostname = dest - + hostname = self.to_safe(hostname).lower() # Add to index From 1ee6d489ab26d6a24c654b7549ebc1791d7a57ea Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Thu, 17 Mar 2016 14:19:28 -0400 Subject: [PATCH 1029/1113] Make sure tuples returned by _get_item() are handled correctly in formatting Fixes #14800 --- lib/ansible/plugins/callback/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index bbce48025ad..99b1762a256 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -175,7 +175,7 @@ class CallbackModule(CallbackBase): else: msg += ": [%s]" % result._host.get_name() - msg += " => (item=%s)" % (self._get_item(result._result)) + msg += " => (item=%s)" % (self._get_item(result._result),) if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % self._dump_results(result._result) From ab693579a9fad0002165b73acb7b4bb2a70d2052 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 17 Mar 2016 14:21:16 -0700 Subject: [PATCH 1030/1113] Clarify the document about the order of matryoshka shells (nesting shells) --- lib/ansible/plugins/connection/__init__.py | 35 ++++++++++++---------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 315293163b5..d8844963683 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -173,8 +173,8 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): When a command is executed, it goes through multiple commands to get there. It looks approximately like this:: - HardCodedShell ConnectionCommand UsersLoginShell DEFAULT_EXECUTABLE BecomeCommand DEFAULT_EXECUTABLE Command - :HardCodedShell: Is optional. It is run locally to invoke the + [LocalShell] ConnectionCommand [UsersLoginShell (*)] DEFAULT_EXECUTABLE [(BecomeCommand DEFAULT_EXECUTABLE)] Command + :LocalShell: Is optional. It is run locally to invoke the ``Connection Command``. In most instances, the ``ConnectionCommand`` can be invoked directly instead. The ssh connection plugin which can have values that need expanding @@ -187,15 +187,17 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): ``ansible_ssh_host`` and so forth are fed to this piece of the command to connect to the correct host (Examples ``ssh``, ``chroot``) - :UsersLoginShell: This is the shell that the ``ansible_ssh_user`` has - configured as their login shell. In traditional UNIX parlance, - this is the last field of a user's ``/etc/passwd`` entry We do not - specifically try to run the ``UsersLoginShell`` when we connect. - Instead it is implicit in the actions that the - ``ConnectionCommand`` takes when it connects to a remote machine. - ``ansible_shell_type`` may be set to inform ansible of differences - in how the ``UsersLoginShell`` handles things like quoting if a - shell has different semantics than the Bourne shell. + :UsersLoginShell: This shell may or may not be created depending on + the ConnectionCommand used by the connection plugin. This is the + shell that the ``ansible_ssh_user`` has configured as their login + shell. In traditional UNIX parlance, this is the last field of + a user's ``/etc/passwd`` entry We do not specifically try to run + the ``UsersLoginShell`` when we connect. Instead it is implicit + in the actions that the ``ConnectionCommand`` takes when it + connects to a remote machine. ``ansible_shell_type`` may be set + to inform ansible of differences in how the ``UsersLoginShell`` + handles things like quoting if a shell has different semantics + than the Bourne shell. :DEFAULT_EXECUTABLE: This is the shell accessible via ``ansible.constants.DEFAULT_EXECUTABLE``. We explicitly invoke this shell so that we have predictable quoting rules at this @@ -207,10 +209,13 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): ``BecomeCommand``. After the ConnectionCommand, this is run by the ``UsersLoginShell``. After the ``BecomeCommand`` we specify that the ``DEFAULT_EXECUTABLE`` is being invoked directly. - :BecomeComand: Is the command that performs privilege escalation. - Setting this up is performed by the action plugin prior to running - ``exec_command``. So we just get passed :param:`cmd` which has the - BecomeCommand already added. (Examples: sudo, su) + :BecomeComand DEFAULTEXECUTABLE: Is the command that performs + privilege escalation. Setting this up is performed by the action + plugin prior to running ``exec_command``. So we just get passed + :param:`cmd` which has the BecomeCommand already added. + (Examples: sudo, su) If we have a BecomeCommand then we will + invoke a DEFAULT_EXECUTABLE shell inside of it so that we have + a consistent view of quoting. :Command: Is the command we're actually trying to run remotely. (Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file) """ From 4ac49ed4a8535c4b4e1aab75bc0d7787be5907dd Mon Sep 17 00:00:00 2001 From: Thomas Steinbach <thomas.steinbach@aikq.de> Date: Fri, 4 Dec 2015 02:33:08 +0100 Subject: [PATCH 1031/1113] use remote_user or become_user in docker connection --- lib/ansible/plugins/connection/docker.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py index 13c6d8dc90b..999b94c22df 100644 --- a/lib/ansible/plugins/connection/docker.py +++ b/lib/ansible/plugins/connection/docker.py @@ -121,8 +121,9 @@ class Connection(ConnectionBase): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' + exec_user = self._play_context.become_user if self._play_context.become else self._play_context.remote_user # -i is needed to keep stdin open which allows pipelining to work - local_cmd = [self.docker_cmd, "exec", '-i', self._play_context.remote_addr, executable, '-c', cmd] + local_cmd = [self.docker_cmd, "exec", '-u', exec_user, '-i', self._play_context.remote_addr, executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr) local_cmd = [to_bytes(i, errors='strict') for i in local_cmd] @@ -169,7 +170,8 @@ class Connection(ConnectionBase): # Older docker doesn't have native support for copying files into # running containers, so we use docker exec to implement this executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' - args = [self.docker_cmd, "exec", "-i", self._play_context.remote_addr, executable, "-c", + exec_user = self._play_context.become_user if self._play_context.become else self._play_context.remote_user + args = [self.docker_cmd, "exec", "-u", exec_user, "-i", self._play_context.remote_addr, executable, "-c", "dd of=%s bs=%s" % (out_path, BUFSIZE)] args = [to_bytes(i, errors='strict') for i in args] with open(to_bytes(in_path, errors='strict'), 'rb') as in_file: From 14dfad730e6356256950efbafd03e57f680499c7 Mon Sep 17 00:00:00 2001 From: Thomas Steinbach <thomas.steinbach@aikq.de> Date: Sun, 20 Dec 2015 18:18:29 +0100 Subject: [PATCH 1032/1113] use just 'remote_user' as user for the docker connection --- lib/ansible/plugins/connection/docker.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py index 999b94c22df..657b8b65357 100644 --- a/lib/ansible/plugins/connection/docker.py +++ b/lib/ansible/plugins/connection/docker.py @@ -121,9 +121,8 @@ class Connection(ConnectionBase): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' - exec_user = self._play_context.become_user if self._play_context.become else self._play_context.remote_user # -i is needed to keep stdin open which allows pipelining to work - local_cmd = [self.docker_cmd, "exec", '-u', exec_user, '-i', self._play_context.remote_addr, executable, '-c', cmd] + local_cmd = [self.docker_cmd, "exec", '-u', self._play_context.remote_user, '-i', self._play_context.remote_addr, executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr) local_cmd = [to_bytes(i, errors='strict') for i in local_cmd] @@ -170,8 +169,7 @@ class Connection(ConnectionBase): # Older docker doesn't have native support for copying files into # running containers, so we use docker exec to implement this executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' - exec_user = self._play_context.become_user if self._play_context.become else self._play_context.remote_user - args = [self.docker_cmd, "exec", "-u", exec_user, "-i", self._play_context.remote_addr, executable, "-c", + args = [self.docker_cmd, "exec", '-u', self._play_context.remote_user, "-i", self._play_context.remote_addr, executable, "-c", "dd of=%s bs=%s" % (out_path, BUFSIZE)] args = [to_bytes(i, errors='strict') for i in args] with open(to_bytes(in_path, errors='strict'), 'rb') as in_file: From db61e9be0cb4f9840156877dac455fb46809c5d5 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 11 Mar 2016 20:38:38 -0500 Subject: [PATCH 1033/1113] add ansible_executable inventory var also handle the overrides appropriately also new executable to set shell type --- docsite/rst/intro_configuration.rst | 2 ++ docsite/rst/intro_inventory.rst | 11 +++++++++-- lib/ansible/playbook/play_context.py | 10 +++++++++- lib/ansible/plugins/action/__init__.py | 6 ++++-- lib/ansible/plugins/connection/__init__.py | 4 ++-- test/units/plugins/action/test_action.py | 6 +++--- 6 files changed, 29 insertions(+), 10 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 87f43d1a91c..64070e32007 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -289,6 +289,8 @@ This indicates the command to use to spawn a shell under a sudo environment. Us executable = /bin/bash +Starting in version 2.1 this can be overriden by the inventory var ``ansible_executable``. + .. _filter_plugins: filter_plugins diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index b52183b3845..f8b303f597d 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -231,8 +231,7 @@ SSH connection:: ansible_ssh_extra_args This setting is always appended to the default ssh command line. ansible_ssh_pipelining - Determines whether or not to use SSH pipelining. This can override the - ``pipelining`` setting in ``ansible.cfg``. + Determines whether or not to use SSH pipelining. This can override the ``pipelining`` setting in ``ansible.cfg``. Privilege escalation (see :doc:`Ansible Privilege Escalation<become>` for further details):: @@ -261,6 +260,14 @@ Remote host environment parameters:: Works for anything such as ruby or perl and works just like ansible_python_interpreter. This replaces shebang of modules which will run on that host. +.. versionadded:: 2.1 + +:: + + ansible_executable + This sets the shell the ansible controller will use on the target machine, overrides ``executable`` in ``ansible.cfg`` which defaults to '/bin/sh'. + You should really only change it if is not possible to use '/bin/sh' (i.e. it is not in the list of allowed shells for your users). + Examples from a host file:: some_host ansible_port=2222 ansible_user=manager diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index a48f7395b49..de6b8170550 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -78,6 +78,7 @@ MAGIC_VARIABLE_MAPPING = dict( su_pass = ('ansible_su_password', 'ansible_su_pass'), su_exe = ('ansible_su_exe',), su_flags = ('ansible_su_flags',), + executable = ('ansible_executable',), ) SU_PROMPT_LOCALIZATIONS = [ @@ -163,6 +164,7 @@ class PlayContext(Base): _accelerate = FieldAttribute(isa='bool', default=False) _accelerate_ipv6 = FieldAttribute(isa='bool', default=False, always_post_validate=True) _accelerate_port = FieldAttribute(isa='int', default=C.ACCELERATE_PORT, always_post_validate=True) + _executable = FieldAttribute(isa='string', default=C.DEFAULT_EXECUTABLE) # privilege escalation fields _become = FieldAttribute(isa='bool') @@ -354,6 +356,12 @@ class PlayContext(Base): else: delegated_vars = dict() + # setup shell + for exe_var in MAGIC_VARIABLE_MAPPING.get('executable'): + if exe_var in variables: + setattr(new_info, 'executable', variables.get(exe_var)) + + attrs_considered = [] for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING): for variable_name in variable_names: @@ -417,7 +425,7 @@ class PlayContext(Base): self.prompt = None if executable is None: - executable = C.DEFAULT_EXECUTABLE + executable = self.executable if self.become: diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 392b6704060..8666329ffff 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -521,7 +521,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): display.debug("done with _execute_module (%s, %s)" % (module_name, module_args)) return data - def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=C.DEFAULT_EXECUTABLE, encoding_errors='replace'): + def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='replace'): ''' This is the function which executes the low level shell command, which may be commands to create/remove directories for temporary files, or to @@ -548,7 +548,9 @@ class ActionBase(with_metaclass(ABCMeta, object)): display.debug("_low_level_execute_command(): using become for this command") cmd = self._play_context.make_become_cmd(cmd, executable=executable) - if executable is not None and self._connection.allow_executable: + if self._connection.allow_executable: + if executable is None: + executable = self._play_context.executable cmd = executable + ' -c ' + pipes.quote(cmd) display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index d8844963683..958a3f8f0ae 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -1,4 +1,4 @@ -# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> + # (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible @@ -89,7 +89,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): shell_type = getattr(self, '_shell_type') else: shell_type = 'sh' - shell_filename = os.path.basename(C.DEFAULT_EXECUTABLE) + shell_filename = os.path.basename(self.play_context.executable) for shell in shell_loader.all(): if shell_filename in shell.COMPATIBLE_SHELLS: shell_type = shell.SHELL_FAMILY diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index ea44e315642..4bb151f090f 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -617,8 +617,8 @@ class TestActionBase(unittest.TestCase): play_context.make_become_cmd.assert_not_called() play_context.remote_user = 'apo' - action_base._low_level_execute_command('ECHO', sudoable=True) - play_context.make_become_cmd.assert_called_once_with("ECHO", executable='/bin/sh') + action_base._low_level_execute_command('ECHO', sudoable=True, executable='/bin/csh') + play_context.make_become_cmd.assert_called_once_with("ECHO", executable='/bin/csh') play_context.make_become_cmd.reset_mock() @@ -627,6 +627,6 @@ class TestActionBase(unittest.TestCase): try: play_context.remote_user = 'root' action_base._low_level_execute_command('ECHO SAME', sudoable=True) - play_context.make_become_cmd.assert_called_once_with("ECHO SAME", executable='/bin/sh') + play_context.make_become_cmd.assert_called_once_with("ECHO SAME", executable=None) finally: C.BECOME_ALLOW_SAME_USER = become_allow_same_user From 0cdbc8b48da32ad706f89ffb58d09a27b8dd8965 Mon Sep 17 00:00:00 2001 From: Evgeni Golov <egolov@redhat.com> Date: Fri, 18 Mar 2016 11:07:23 +0100 Subject: [PATCH 1034/1113] backwards compat for python-support on old Debian/Ubuntu releases --- packaging/debian/README.md | 7 ++++++- packaging/debian/control | 2 +- packaging/debian/rules | 3 +++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/packaging/debian/README.md b/packaging/debian/README.md index 22e584ff5c7..1561d854f8d 100644 --- a/packaging/debian/README.md +++ b/packaging/debian/README.md @@ -4,11 +4,16 @@ Ansible Debian Package To create an Ansible DEB package: sudo apt-get install python-paramiko python-yaml python-jinja2 python-httplib2 python-setuptools python-six sshpass - sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro dh-python fakeroot asciidoc devscripts docbook-xml xsltproc libxml2-utils + sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro fakeroot asciidoc devscripts docbook-xml xsltproc libxml2-utils + sudo apt-get install dh-python git clone git://github.com/ansible/ansible.git cd ansible make deb +On older releases that do not have `dh-python` (like Ubuntu 12.04), install `python-support` instead: + + sudo apt-get install python-support + The debian package file will be placed in the `../` directory. This can then be added to an APT repository or installed with `dpkg -i <package-file>`. Note that `dpkg -i` does not resolve dependencies. diff --git a/packaging/debian/control b/packaging/debian/control index e5dc2d44970..c6b699d798b 100644 --- a/packaging/debian/control +++ b/packaging/debian/control @@ -3,7 +3,7 @@ Section: admin Priority: optional Standards-Version: 3.9.3 Maintainer: Ansible, Inc. <support@ansible.com> -Build-Depends: cdbs, debhelper (>= 5.0.0), asciidoc, python, dh-python, python-setuptools +Build-Depends: cdbs, debhelper (>= 5.0.0), asciidoc, python, dh-python | python-support, python-setuptools Homepage: http://ansible.github.com/ Package: ansible diff --git a/packaging/debian/rules b/packaging/debian/rules index f5894b7a43b..3ebffe5c838 100755 --- a/packaging/debian/rules +++ b/packaging/debian/rules @@ -2,6 +2,9 @@ # -- makefile -- DEB_PYTHON2_MODULE_PACKAGES=ansible +ifneq ($(shell dpkg-query -f '$${Version}' -W python-support 2>/dev/null),) +DEB_PYTHON_SYSTEM=pysupport +endif include /usr/share/cdbs/1/rules/debhelper.mk include /usr/share/cdbs/1/class/python-distutils.mk From 2ba4428424a97716e107ae36d79ef332439c36d1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Mar 2016 05:52:53 -0700 Subject: [PATCH 1035/1113] Catch ValueError as well because of El Capitan provoking a bug in python2's subprocess Fixes #14895 --- lib/ansible/parsing/vault/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 8ea80d1b07a..8635da52bff 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -275,8 +275,12 @@ class VaultEditor: try: r = call(['shred', tmp_path]) - except OSError: + except (OSError, ValueError): # shred is not available on this system, or some other error occured. + # ValueError caught because OS X El Capitan is raising an + # exception big enough to hit a limit in python2-2.7.11 and below. + # Symptom is ValueError: insecure pickle when shred is not + # installed there. r = 1 if r != 0: From e25caebe7a6cd5a8f31451f943fc6849b3dd847b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Mar 2016 06:40:04 -0700 Subject: [PATCH 1036/1113] Cleanups to docs and rename of inv var --- docsite/rst/intro_configuration.rst | 2 +- docsite/rst/intro_inventory.rst | 8 ++++---- lib/ansible/playbook/play_context.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 64070e32007..2d12825ea02 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -289,7 +289,7 @@ This indicates the command to use to spawn a shell under a sudo environment. Us executable = /bin/bash -Starting in version 2.1 this can be overriden by the inventory var ``ansible_executable``. +Starting in version 2.1 this can be overriden by the inventory var ``ansible_shell_executable``. .. _filter_plugins: diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index f8b303f597d..bf103f28472 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -247,7 +247,7 @@ Privilege escalation (see :doc:`Ansible Privilege Escalation<become>` for furthe Remote host environment parameters:: ansible_shell_type - The shell type of the target system. You should not use this setting unless you have set the 'executable' to a non sh compatible shell. + The shell type of the target system. You should not use this setting unless you have set the 'ansible_shell_executable' to a non sh compatible shell. By default commands are formatted using 'sh'-style syntax. Setting this to 'csh' or 'fish' will cause commands executed on target systems to follow those shell's syntax instead. ansible_python_interpreter @@ -264,9 +264,9 @@ Remote host environment parameters:: :: - ansible_executable - This sets the shell the ansible controller will use on the target machine, overrides ``executable`` in ``ansible.cfg`` which defaults to '/bin/sh'. - You should really only change it if is not possible to use '/bin/sh' (i.e. it is not in the list of allowed shells for your users). + ansible_shell_executable + This sets the shell the ansible controller will use on the target machine, overrides ``executable`` in ``ansible.cfg`` which defaults to ``/bin/sh``. + You should really only change it if is not possible to use ``/bin/sh`` (i.e. ``/bin/sh`` is not installed on the target machine.). Examples from a host file:: diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index de6b8170550..dce14c9b7fb 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -78,7 +78,7 @@ MAGIC_VARIABLE_MAPPING = dict( su_pass = ('ansible_su_password', 'ansible_su_pass'), su_exe = ('ansible_su_exe',), su_flags = ('ansible_su_flags',), - executable = ('ansible_executable',), + executable = ('ansible_shell_executable',), ) SU_PROMPT_LOCALIZATIONS = [ From b809d23863177a0c2842c313eb89adee5d36862f Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 18 Mar 2016 07:03:54 -0700 Subject: [PATCH 1037/1113] fixed typo --- lib/ansible/plugins/connection/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 958a3f8f0ae..d1820a2ef38 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -89,7 +89,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): shell_type = getattr(self, '_shell_type') else: shell_type = 'sh' - shell_filename = os.path.basename(self.play_context.executable) + shell_filename = os.path.basename(self._play_context.executable) for shell in shell_loader.all(): if shell_filename in shell.COMPATIBLE_SHELLS: shell_type = shell.SHELL_FAMILY From 3bfb556cb9aa1f4ce0af807796c119d6aa3d113f Mon Sep 17 00:00:00 2001 From: SUNTRUP <BS085X@att.com> Date: Thu, 17 Mar 2016 10:23:08 -0500 Subject: [PATCH 1038/1113] Clarify developing modules documentation This resolves issue #15022. When doing the module tutorial, it might be necessary for the user to read through the documentation for installing from source to avoid dependency errors and such. --- docsite/rst/developing_modules.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 741ec5231bd..14bb55e3385 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -71,6 +71,9 @@ There's a useful test script in the source checkout for ansible:: source ansible/hacking/env-setup chmod +x ansible/hacking/test-module +For instructions on setting up ansible from source, please see +:doc:`intro_installation`. + Let's run the script you just wrote with that:: ansible/hacking/test-module -m ./timetest.py From d43fc631dd22372407f55265982d60632bad73a2 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 18 Mar 2016 07:25:29 -0700 Subject: [PATCH 1039/1113] mount facts now include network mounts (nfs) --- lib/ansible/module_utils/facts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 335248f5361..203a72996d1 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -1084,8 +1084,8 @@ class LinuxHardware(Hardware): self.facts['mounts'] = [] mtab = get_file_content('/etc/mtab', '') for line in mtab.split('\n'): - if line.startswith('/'): - fields = line.rstrip('\n').split() + fields = line.rstrip('\n').split() + if fields[0].startswith('/') or ':/' in fields[0]: if(fields[2] != 'none'): size_total, size_available = self._get_mount_size_facts(fields[1]) if fields[0] in uuids: From bdf90d20dd463c9beea3ab2d791cec8e757e4e1b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Mar 2016 07:46:01 -0700 Subject: [PATCH 1040/1113] Add :Z to mount the volume. This is a docker-1.7+ option that makes the mount properly relabel for selinux --- test/utils/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index d6476f862b5..eef7f1f60ed 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -10,7 +10,7 @@ if [ "${TARGET}" = "sanity" ]; then else set -e export C_NAME="testAbull_$$_$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 8 | head -n 1)" - docker run -d --volume="${PWD}:/root/ansible" --name "${C_NAME}" ${TARGET_OPTIONS} ansible/ansible:${TARGET} > /tmp/cid_${TARGET} + docker run -d --volume="${PWD}:/root/ansible:Z" --name "${C_NAME}" ${TARGET_OPTIONS} ansible/ansible:${TARGET} > /tmp/cid_${TARGET} docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c "export TEST_FLAGS='${TEST_FLAGS}'; cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)" docker kill $(cat /tmp/cid_${TARGET}) From 95495a6b389750310cb6b15edfc0c40fa38c6898 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 18 Mar 2016 08:34:39 -0700 Subject: [PATCH 1041/1113] fixed typo fixes #15036 --- docs/man/man1/ansible-playbook.1.asciidoc.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in index 5a6ec659ff6..1ffbaaf52a8 100644 --- a/docs/man/man1/ansible-playbook.1.asciidoc.in +++ b/docs/man/man1/ansible-playbook.1.asciidoc.in @@ -165,7 +165,7 @@ Add the specified arguments to any ssh command-line. *-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME':: -Sudo to 'SUDO_USERNAME' deafult is root. (deprecated, use become). +Sudo to 'SUDO_USERNAME' default is root. (deprecated, use become). *--skip-tags=*'SKIP_TAGS':: From f82639e4a242a23b5bc786048467c21a8a3ddaad Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen <ams@2ndQuadrant.com> Date: Fri, 18 Mar 2016 21:08:41 +0530 Subject: [PATCH 1042/1113] Fix the other typo from #15036 --- docs/man/man1/ansible-playbook.1.asciidoc.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in index 1ffbaaf52a8..d34ddbc2dac 100644 --- a/docs/man/man1/ansible-playbook.1.asciidoc.in +++ b/docs/man/man1/ansible-playbook.1.asciidoc.in @@ -101,7 +101,7 @@ Alternatively, you can use a comma-separated list of hosts or a single host with *-l* 'SUBSET', *--limit=*'SUBSET':: Further limits the selected host/group patterns. -You can prefix it with '~' to indicate that the pattern in a regex. +You can prefix it with '~' to indicate that the pattern is a regex. *--list-hosts*:: From 6578e63e631fe79dc8fef43e90e7642e53385641 Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Fri, 18 Mar 2016 08:50:07 -0700 Subject: [PATCH 1043/1113] improve test_test_infra debug messaging, rc check --- test/integration/Makefile | 3 ++- test/integration/test_test_infra.yml | 11 +++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 331022ce516..b02592eaf2c 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -26,7 +26,8 @@ EUID := $(shell id -u -r) all: setup test_test_infra parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log test_connection test_gathering_facts test_test_infra: - [ "$$(ansible-playbook -i $(INVENTORY) test_test_infra.yml -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) | fgrep works | xargs)" = "msg: fail works (True) msg: assert works (True)" ] + # ensure fail/assert work and can stop execution with non-zero exit code + PB_OUT=$$(ansible-playbook -i $(INVENTORY) test_test_infra.yml -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS)) ; APB_RC=$$? ; echo "$$PB_OUT" ; echo "rc was $$APB_RC (must be non-zero)" ; [ $$APB_RC -ne 0 ] setup: rm -rf $(TEST_DIR) diff --git a/test/integration/test_test_infra.yml b/test/integration/test_test_infra.yml index b78e36d2c92..48028ad8a7e 100644 --- a/test/integration/test_test_infra.yml +++ b/test/integration/test_test_infra.yml @@ -3,14 +3,16 @@ tags: - always tasks: - - fail: + - name: ensure fail action produces a failing result + fail: ignore_errors: yes register: fail_out - debug: msg: fail works ({{ fail_out.failed }}) - - assert: + - name: ensure assert produces a failing result + assert: that: false ignore_errors: yes register: assert_out @@ -18,6 +20,7 @@ - debug: msg: assert works ({{ assert_out.failed }}) - - fail: - msg: fail actually failed + - name: ensure fail action stops execution + fail: + msg: fail actually failed (this is expected) From 287fd29db942453dd1c84fa665b4a7a8a57443e4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Mar 2016 08:51:17 -0700 Subject: [PATCH 1044/1113] Use better rst markup (main change definition lists instead of code-blocks for config file options --- docsite/rst/intro_inventory.rst | 109 ++++++++++++++++---------------- 1 file changed, 55 insertions(+), 54 deletions(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index bf103f28472..d413ea64c68 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -200,73 +200,74 @@ List of Behavioral Inventory Parameters As alluded to above, setting the following variables controls how ansible interacts with remote hosts. -Host connection:: +Host connection: - ansible_connection - Connection type to the host. Candidates are local, smart, ssh or paramiko. The default is smart. +ansible_connection + Connection type to the host. This can be the name of any of ansible's connection plugins. Common connection types are local, smart, ssh or paramiko. The default is smart. .. include:: ansible_ssh_changes_note.rst -SSH connection:: +SSH connection: - ansible_host - The name of the host to connect to, if different from the alias you wish to give to it. - ansible_port - The ssh port number, if not 22 - ansible_user - The default ssh user name to use. - ansible_ssh_pass - The ssh password to use (this is insecure, we strongly recommend using --ask-pass or SSH keys) - ansible_ssh_private_key_file - Private key file used by ssh. Useful if using multiple keys and you don't want to use SSH agent. - ansible_ssh_common_args - This setting is always appended to the default command line for - sftp, scp, and ssh. Useful to configure a ``ProxyCommand`` for a - certain host (or group). - ansible_sftp_extra_args - This setting is always appended to the default sftp command line. - ansible_scp_extra_args - This setting is always appended to the default scp command line. - ansible_ssh_extra_args - This setting is always appended to the default ssh command line. - ansible_ssh_pipelining - Determines whether or not to use SSH pipelining. This can override the ``pipelining`` setting in ``ansible.cfg``. +ansible_host + The name of the host to connect to, if different from the alias you wish to give to it. +ansible_port + The ssh port number, if not 22 +ansible_user + The default ssh user name to use. +ansible_ssh_pass + The ssh password to use (this is insecure, we strongly recommend using :option:`--ask-pass` or SSH keys) +ansible_ssh_private_key_file + Private key file used by ssh. Useful if using multiple keys and you don't want to use SSH agent. +ansible_ssh_common_args + This setting is always appended to the default command line for :command:`sftp`, :command:`scp`, + and :command:`ssh`. Useful to configure a ``ProxyCommand`` for a certain host (or + group). +ansible_sftp_extra_args + This setting is always appended to the default :command:`sftp` command line. +ansible_scp_extra_args + This setting is always appended to the default :command:`scp` command line. +ansible_ssh_extra_args + This setting is always appended to the default :command:`ssh` command line. +ansible_ssh_pipelining + Determines whether or not to use SSH pipelining. This can override the ``pipelining`` setting in :file:`ansible.cfg`. -Privilege escalation (see :doc:`Ansible Privilege Escalation<become>` for further details):: +Privilege escalation (see :doc:`Ansible Privilege Escalation<become>` for further details): - ansible_become - Equivalent to ansible_sudo or ansible_su, allows to force privilege escalation - ansible_become_method - Allows to set privilege escalation method - ansible_become_user - Equivalent to ansible_sudo_user or ansible_su_user, allows to set the user you become through privilege escalation - ansible_become_pass - Equivalent to ansible_sudo_pass or ansible_su_pass, allows you to set the privilege escalation password +ansible_become + Equivalent to ``ansible_sudo`` or ``ansible_su``, allows to force privilege escalation +ansible_become_method + Allows to set privilege escalation method +ansible_become_user + Equivalent to ``ansible_sudo_user`` or ``ansible_su_user``, allows to set the user you become through privilege escalation +ansible_become_pass + Equivalent to ``ansible_sudo_pass`` or ``ansible_su_pass``, allows you to set the privilege escalation password -Remote host environment parameters:: +Remote host environment parameters: - ansible_shell_type - The shell type of the target system. You should not use this setting unless you have set the 'ansible_shell_executable' to a non sh compatible shell. - By default commands are formatted using 'sh'-style syntax. - Setting this to 'csh' or 'fish' will cause commands executed on target systems to follow those shell's syntax instead. - ansible_python_interpreter - The target host python path. This is useful for systems with more - than one Python or not located at "/usr/bin/python" such as *BSD, or where /usr/bin/python - is not a 2.X series Python. We do not use the "/usr/bin/env" mechanism as that requires the remote user's - path to be set right and also assumes the "python" executable is named python, where the executable might - be named something like "python26". - ansible_*_interpreter - Works for anything such as ruby or perl and works just like ansible_python_interpreter. - This replaces shebang of modules which will run on that host. +ansible_shell_type + The shell type of the target system. You should not use this setting unless you have set the ``ansible_shell_executable`` to a non-Bourne (sh) compatible shell. + By default commands are formatted using ``sh``-style syntax. + Setting this to ``csh`` or ``fish`` will cause commands executed on target systems to follow those shell's syntax instead. +ansible_python_interpreter + The target host python path. This is useful for systems with more + than one Python or not located at :command:`/usr/bin/python` such as \*BSD, or where :command:`/usr/bin/python` + is not a 2.X series Python. We do not use the :command:`/usr/bin/env` mechanism as that requires the remote user's + path to be set right and also assumes the :program:`python` executable is named python, where the executable might + be named something like :program:`python2.6`. +ansible_*_interpreter + Works for anything such as ruby or perl and works just like ``ansible_python_interpreter``. + This replaces shebang of modules which will run on that host. .. versionadded:: 2.1 -:: - - ansible_shell_executable - This sets the shell the ansible controller will use on the target machine, overrides ``executable`` in ``ansible.cfg`` which defaults to ``/bin/sh``. - You should really only change it if is not possible to use ``/bin/sh`` (i.e. ``/bin/sh`` is not installed on the target machine.). +ansible_shell_executable + This sets the shell the ansible controller will use on the target machine, + overrides ``executable`` in :file:`ansible.cfg` which defaults to + :command:`/bin/sh`. You should really only change it if is not possible + to use :command:`/bin/sh` (i.e. :command:`/bin/sh` is not installed on the target + machine.). Examples from a host file:: From 08394df8d0774de66e7c816e2578f6e8222dcca0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Mar 2016 09:06:05 -0700 Subject: [PATCH 1045/1113] Update submodule links for docs fixes --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 345d9cbca86..eebd6c93ba0 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 345d9cbca86a8202f3044261c84429c305bd78b8 +Subproject commit eebd6c93ba04ffac98e710459edd990b132f7820 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f47b499bb99..33e1d9d1cb4 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f47b499bb9935b77b92f71eecfe981b9073184d6 +Subproject commit 33e1d9d1cb41a4dacaf9c649d6889ae6ac0afcbf From 8bdf0d4746c0c88ebc8af231afab40954811e613 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Mar 2016 09:09:31 -0700 Subject: [PATCH 1046/1113] And another doc fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index eebd6c93ba0..22a9a15a5b0 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit eebd6c93ba04ffac98e710459edd990b132f7820 +Subproject commit 22a9a15a5b08a037aa94087aba33839907881bf1 From 60c943997bced134ffa7c4440a4021e77cd29540 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Fri, 18 Mar 2016 09:16:21 -0700 Subject: [PATCH 1047/1113] More doc updates regarding ansible_shell_executable --- docsite/rst/intro_inventory.rst | 2 +- lib/ansible/plugins/connection/__init__.py | 27 +++++++++++----------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index d413ea64c68..6cf7081f3aa 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -267,7 +267,7 @@ ansible_shell_executable overrides ``executable`` in :file:`ansible.cfg` which defaults to :command:`/bin/sh`. You should really only change it if is not possible to use :command:`/bin/sh` (i.e. :command:`/bin/sh` is not installed on the target - machine.). + machine or cannot be run from sudo.). Examples from a host file:: diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index d1820a2ef38..86f2a3b550d 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -173,7 +173,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): When a command is executed, it goes through multiple commands to get there. It looks approximately like this:: - [LocalShell] ConnectionCommand [UsersLoginShell (*)] DEFAULT_EXECUTABLE [(BecomeCommand DEFAULT_EXECUTABLE)] Command + [LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command :LocalShell: Is optional. It is run locally to invoke the ``Connection Command``. In most instances, the ``ConnectionCommand`` can be invoked directly instead. The ssh @@ -198,24 +198,25 @@ class ConnectionBase(with_metaclass(ABCMeta, object)): to inform ansible of differences in how the ``UsersLoginShell`` handles things like quoting if a shell has different semantics than the Bourne shell. - :DEFAULT_EXECUTABLE: This is the shell accessible via - ``ansible.constants.DEFAULT_EXECUTABLE``. We explicitly invoke - this shell so that we have predictable quoting rules at this - point. The ``DEFAULT_EXECUTABLE`` is only settable by the user - because some sudo setups may only allow invoking a specific Bourne - shell. (For instance, ``/bin/bash`` may be allowed but - ``/bin/sh``, our default, may not). We invoke this twice, once - after the ``ConnectionCommand`` and once after the + :ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var + ``ansible_shell_executable`` or via + ``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set. + We explicitly invoke this shell so that we have predictable + quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only + settable by the user because some sudo setups may only allow + invoking a specific shell. (For instance, ``/bin/bash`` may be + allowed but ``/bin/sh``, our default, may not). We invoke this + twice, once after the ``ConnectionCommand`` and once after the ``BecomeCommand``. After the ConnectionCommand, this is run by the ``UsersLoginShell``. After the ``BecomeCommand`` we specify - that the ``DEFAULT_EXECUTABLE`` is being invoked directly. - :BecomeComand DEFAULTEXECUTABLE: Is the command that performs + that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly. + :BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs privilege escalation. Setting this up is performed by the action plugin prior to running ``exec_command``. So we just get passed :param:`cmd` which has the BecomeCommand already added. (Examples: sudo, su) If we have a BecomeCommand then we will - invoke a DEFAULT_EXECUTABLE shell inside of it so that we have - a consistent view of quoting. + invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we + have a consistent view of quoting. :Command: Is the command we're actually trying to run remotely. (Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file) """ From 8259c091d6fd23bf71c7224bf8ab787b013f465a Mon Sep 17 00:00:00 2001 From: Vlad Panainte <vlad.panainte@appsbroker.com> Date: Fri, 18 Mar 2016 16:25:40 +0000 Subject: [PATCH 1048/1113] fix logging --- contrib/inventory/gce.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/inventory/gce.py b/contrib/inventory/gce.py index b13c194a6e7..690d845a02d 100755 --- a/contrib/inventory/gce.py +++ b/contrib/inventory/gce.py @@ -90,6 +90,9 @@ import os import argparse import ConfigParser +import logging +logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler()) + try: import json except ImportError: From b95286c88e3e8636ecd697cc41b55dcf7fbeb026 Mon Sep 17 00:00:00 2001 From: nitzmahone <mdavis@ansible.com> Date: Fri, 18 Mar 2016 10:13:45 -0700 Subject: [PATCH 1049/1113] re-integrate test_test_infra output checking added secondary run with overridden inventory/test args per bcoca request --- test/integration/Makefile | 6 ++++-- test/integration/inventory.local | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 test/integration/inventory.local diff --git a/test/integration/Makefile b/test/integration/Makefile index b02592eaf2c..48382cd4c49 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -26,8 +26,10 @@ EUID := $(shell id -u -r) all: setup test_test_infra parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log test_connection test_gathering_facts test_test_infra: - # ensure fail/assert work and can stop execution with non-zero exit code - PB_OUT=$$(ansible-playbook -i $(INVENTORY) test_test_infra.yml -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS)) ; APB_RC=$$? ; echo "$$PB_OUT" ; echo "rc was $$APB_RC (must be non-zero)" ; [ $$APB_RC -ne 0 ] + # ensure fail/assert work locally and can stop execution with non-zero exit code + PB_OUT=$$(ansible-playbook -i inventory.local test_test_infra.yml) ; APB_RC=$$? ; echo "$$PB_OUT" ; echo "rc was $$APB_RC (must be non-zero)" ; [ $$APB_RC -ne 0 ] ; echo "ensure playbook output shows assert/fail works (True)" ; echo "$$PB_OUT" | grep "fail works (True)" || exit 1 ; echo "$$PB_OUT" | fgrep "assert works (True)" || exit 1 + # ensure we work using all specified test args, overridden inventory, etc + PB_OUT=$$(ansible-playbook -i $(INVENTORY) test_test_infra.yml -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS)) ; APB_RC=$$? ; echo "$$PB_OUT" ; echo "rc was $$APB_RC (must be non-zero)" ; [ $$APB_RC -ne 0 ] ; echo "ensure playbook output shows assert/fail works (True)" ; echo "$$PB_OUT" | grep "fail works (True)" || exit 1 ; echo "$$PB_OUT" | fgrep "assert works (True)" || exit 1 setup: rm -rf $(TEST_DIR) diff --git a/test/integration/inventory.local b/test/integration/inventory.local new file mode 100644 index 00000000000..2baa1f88fbe --- /dev/null +++ b/test/integration/inventory.local @@ -0,0 +1,2 @@ +testhost ansible_connection=local + From 421fb6df8be10f3adaa4457e6915fded21cb76a2 Mon Sep 17 00:00:00 2001 From: karimb <karimboumedhel@gmail.com> Date: Wed, 16 Mar 2016 13:58:08 +0100 Subject: [PATCH 1050/1113] Fixes ovirt inventory to only override credentials from environment when keys exist --- contrib/inventory/ovirt.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/inventory/ovirt.py b/contrib/inventory/ovirt.py index 23646fa2068..dccbf421276 100755 --- a/contrib/inventory/ovirt.py +++ b/contrib/inventory/ovirt.py @@ -172,9 +172,9 @@ class OVirtInventory(object): # If the appropriate environment variables are set, they override # other configuration; process those into our args and kwargs. - kwargs['url'] = os.environ.get('OVIRT_URL') - kwargs['username'] = os.environ.get('OVIRT_EMAIL') - kwargs['password'] = os.environ.get('OVIRT_PASS') + kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url']) + kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None) + kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None) # Retrieve and return the ovirt driver. return API(insecure=True, **kwargs) From 13f3cbaf3b82e28de334162f04d498f6cac8ab0e Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Fri, 18 Mar 2016 15:15:59 -0400 Subject: [PATCH 1051/1113] Renaming per-item and retry callbacks --- lib/ansible/executor/process/result.py | 8 ++++---- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- lib/ansible/plugins/callback/__init__.py | 8 ++++---- lib/ansible/plugins/callback/actionable.py | 10 +++++----- lib/ansible/plugins/callback/default.py | 8 ++++---- lib/ansible/plugins/callback/skippy.py | 2 +- lib/ansible/plugins/strategy/__init__.py | 2 +- 8 files changed, 21 insertions(+), 21 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 5b90c2c4e52..aef8f801a21 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -106,15 +106,15 @@ class ResultProcess(multiprocessing.Process): # send callbacks for 'non final' results if '_ansible_retry' in result._result: - self._send_result(('v2_playbook_retry', result)) + self._send_result(('v2_runner_retry', result)) continue elif '_ansible_item_result' in result._result: if result.is_failed() or result.is_unreachable(): - self._send_result(('v2_playbook_item_on_failed', result)) + self._send_result(('v2_runner_item_on_failed', result)) elif result.is_skipped(): - self._send_result(('v2_playbook_item_on_skipped', result)) + self._send_result(('v2_runner_item_on_skipped', result)) else: - self._send_result(('v2_playbook_item_on_ok', result)) + self._send_result(('v2_runner_item_on_ok', result)) if 'diff' in result._result: self._send_result(('v2_on_file_diff', result)) continue diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 22a9a15a5b0..345d9cbca86 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 22a9a15a5b08a037aa94087aba33839907881bf1 +Subproject commit 345d9cbca86a8202f3044261c84429c305bd78b8 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 33e1d9d1cb4..f47b499bb99 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 33e1d9d1cb41a4dacaf9c649d6889ae6ac0afcbf +Subproject commit f47b499bb9935b77b92f71eecfe981b9073184d6 diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 6bf71243985..61cae7b7ba5 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -341,15 +341,15 @@ class CallbackBase: def v2_playbook_on_include(self, included_file): pass #no v1 correspondance - def v2_playbook_item_on_ok(self, result): + def v2_runner_item_on_ok(self, result): pass - def v2_playbook_item_on_failed(self, result): + def v2_runner_item_on_failed(self, result): pass - def v2_playbook_item_on_skipped(self, result): + def v2_runner_item_on_skipped(self, result): pass - def v2_playbook_retry(self, result): + def v2_runner_retry(self, result): pass diff --git a/lib/ansible/plugins/callback/actionable.py b/lib/ansible/plugins/callback/actionable.py index c0a22d4357a..32ffb77a44c 100644 --- a/lib/ansible/plugins/callback/actionable.py +++ b/lib/ansible/plugins/callback/actionable.py @@ -63,14 +63,14 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_include(self, included_file): pass - def v2_playbook_item_on_ok(self, result): + def v2_runner_item_on_ok(self, result): self.display_task_banner() - self.super_ref.v2_playbook_item_on_ok(result) + self.super_ref.v2_runner_item_on_ok(result) - def v2_playbook_item_on_skipped(self, result): + def v2_runner_item_on_skipped(self, result): pass - def v2_playbook_item_on_failed(self, result): + def v2_runner_item_on_failed(self, result): self.display_task_banner() - self.super_ref.v2_playbook_item_on_failed(result) + self.super_ref.v2_runner_item_on_failed(result) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 99b1762a256..63fc6bc7438 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -159,7 +159,7 @@ class CallbackModule(CallbackBase): if diff: self._display.display(diff) - def v2_playbook_item_on_ok(self, result): + def v2_runner_item_on_ok(self, result): delegated_vars = result._result.get('_ansible_delegated_vars', None) if result._task.action == 'include': return @@ -181,7 +181,7 @@ class CallbackModule(CallbackBase): msg += " => %s" % self._dump_results(result._result) self._display.display(msg, color=color) - def v2_playbook_item_on_failed(self, result): + def v2_runner_item_on_failed(self, result): delegated_vars = result._result.get('_ansible_delegated_vars', None) if 'exception' in result._result: if self._display.verbosity < 3: @@ -205,7 +205,7 @@ class CallbackModule(CallbackBase): self._display.display(msg + " (item=%s) => %s" % (self._get_item(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR) self._handle_warnings(result._result) - def v2_playbook_item_on_skipped(self, result): + def v2_runner_item_on_skipped(self, result): if C.DISPLAY_SKIPPED_HOSTS: msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result)) if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: @@ -257,7 +257,7 @@ class CallbackModule(CallbackBase): if val: self._display.vvvv('%s: %s' % (option,val)) - def v2_playbook_retry(self, result): + def v2_runner_retry(self, result): msg = "FAILED - RETRYING: %s (%d retries left)." % (result._task, result._result['retries'] - result._result['attempts']) if (self._display.verbosity > 2 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += "Result was: %s" % self._dump_results(result._result) diff --git a/lib/ansible/plugins/callback/skippy.py b/lib/ansible/plugins/callback/skippy.py index 306d1a534e5..a934e0b60c4 100644 --- a/lib/ansible/plugins/callback/skippy.py +++ b/lib/ansible/plugins/callback/skippy.py @@ -35,5 +35,5 @@ class CallbackModule(CallbackModule_default): def v2_runner_on_skipped(self, result): pass - def v2_playbook_item_on_skipped(self, result): + def v2_runner_item_on_skipped(self, result): pass diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 38752114021..f8349ea7440 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -329,7 +329,7 @@ class StrategyBase: self._variable_manager.set_nonpersistent_facts(target_host, facts) else: self._variable_manager.set_host_facts(target_host, facts) - elif result[0].startswith('v2_playbook_item') or result[0] == 'v2_playbook_retry': + elif result[0].startswith('v2_runner_item') or result[0] == 'v2_runner_retry': self._tqm.send_callback(result[0], result[1]) elif result[0] == 'v2_on_file_diff': if self._diff: From eceabec71fb15b528837955824100aed86d01a46 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 9 Mar 2016 00:40:14 -0500 Subject: [PATCH 1052/1113] page rds instances fix #14861 --- contrib/inventory/ec2.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index 405868ac84e..da28234787b 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -498,9 +498,14 @@ class Ec2Inventory(object): try: conn = self.connect_to_aws(rds, region) if conn: - instances = conn.get_all_dbinstances() - for instance in instances: - self.add_rds_instance(instance, region) + marker = None + while True: + instances = conn.get_all_dbinstances(marker=marker) + marker = instances.marker + for instance in instances: + self.add_rds_instance(instance, region) + if not marker: + break except boto.exception.BotoServerError as e: error = e.reason From 8bceff513611b8aedd89a748c9cc8d21ed6eea81 Mon Sep 17 00:00:00 2001 From: RNanney <nanney.56@gmail.com> Date: Fri, 18 Mar 2016 15:55:41 -0500 Subject: [PATCH 1053/1113] Update nxos.py --- lib/ansible/module_utils/nxos.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py index e1d71ddc926..23886c742a3 100644 --- a/lib/ansible/module_utils/nxos.py +++ b/lib/ansible/module_utils/nxos.py @@ -25,6 +25,7 @@ NET_COMMON_ARGS = dict( password=dict(no_log=True), transport=dict(default='cli', choices=['cli', 'nxapi']), use_ssl=dict(default=False, type='bool'), + validate_certs=dict(default=True, type='bool'), provider=dict(type='dict') ) From 3fda5e65d42e437661c69b8fd347b423442fa6ad Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Sat, 19 Mar 2016 00:12:53 +0100 Subject: [PATCH 1054/1113] changelog: add cloudflare_dns --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d79c59d70f..a5f1d4265cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ Ansible Changes By Release * ec2_vol_facts * ec2_vpc_dhcp_options * ec2_vpc_net_facts +- cloudflare_dns - cloudstack * cs_cluster * cs_configuration From ea1a6c56b936f9f343f649d279532fef09f2ce57 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Fri, 18 Mar 2016 11:37:12 -0700 Subject: [PATCH 1055/1113] Use docker exec -u when needed and if supported. If remote_user is given and cannot be set in docker, a warning will be displayed unless the default container user matches remote_user. --- lib/ansible/plugins/connection/docker.py | 102 +++++++++++++++-------- 1 file changed, 69 insertions(+), 33 deletions(-) diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py index 657b8b65357..864cf00aed7 100644 --- a/lib/ansible/plugins/connection/docker.py +++ b/lib/ansible/plugins/connection/docker.py @@ -74,15 +74,31 @@ class Connection(ConnectionBase): if not self.docker_cmd: raise AnsibleError("docker command not found in PATH") - self.can_copy_bothways = False - docker_version = self._get_docker_version() if LooseVersion(docker_version) < LooseVersion('1.3'): raise AnsibleError('docker connection type requires docker 1.3 or higher') - # Docker cp in 1.8.0 sets the owner and group to root rather than the - # user that the docker container is set to use by default. - #if LooseVersion(docker_version) >= LooseVersion('1.8.0'): - # self.can_copy_bothways = True + + # The remote user we will request from docker (if supported) + self.remote_user = None + # The actual user which will execute commands in docker (if known) + self.actual_user = None + + if self._play_context.remote_user is not None: + if LooseVersion(docker_version) >= LooseVersion('1.7'): + # Support for specifying the exec user was added in docker 1.7 + self.remote_user = self._play_context.remote_user + self.actual_user = self.remote_user + else: + self.actual_user = self._get_docker_remote_user() + + if self.actual_user != self._play_context.remote_user: + display.warning('docker {0} does not support remote_user, using container default: {1}' + .format(docker_version, self.actual_user or '?')) + elif self._display.verbosity > 2: + # Since we're not setting the actual_user, look it up so we have it for logging later + # Only do this if display verbosity is high enough that we'll need the value + # This saves overhead from calling into docker when we don't need to + self.actual_user = self._get_docker_remote_user() @staticmethod def _sanitize_version(version): @@ -107,12 +123,43 @@ class Connection(ConnectionBase): return self._sanitize_version(cmd_output) + def _get_docker_remote_user(self): + """ Get the default user configured in the docker container """ + p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + out, err = p.communicate() + + if p.returncode != 0: + display.warning('unable to retrieve default user from docker container: %s' % out + err) + return None + + # The default exec user is root, unless it was changed in the Dockerfile with USER + return out.strip() or 'root' + + def _build_exec_cmd(self, cmd): + """ Build the local docker exec command to run cmd on remote_host + + If remote_user is available and is supported by the docker + version we are using, it will be provided to docker exec. + """ + + local_cmd = [self.docker_cmd, 'exec'] + + if self.remote_user is not None: + local_cmd += ['-u', self.remote_user] + + # -i is needed to keep stdin open which allows pipelining to work + local_cmd += ['-i', self._play_context.remote_addr] + cmd + + return local_cmd + def _connect(self, port=None): """ Connect to the container. Nothing to do """ super(Connection, self)._connect() if not self._connected: display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format( - self._play_context.remote_user, host=self._play_context.remote_addr) + self.actual_user or '?', host=self._play_context.remote_addr) ) self._connected = True @@ -120,9 +167,7 @@ class Connection(ConnectionBase): """ Run a command on the docker host """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' - # -i is needed to keep stdin open which allows pipelining to work - local_cmd = [self.docker_cmd, "exec", '-u', self._play_context.remote_user, '-i', self._play_context.remote_addr, executable, '-c', cmd] + local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd]) display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr) local_cmd = [to_bytes(i, errors='strict') for i in local_cmd] @@ -156,32 +201,23 @@ class Connection(ConnectionBase): raise AnsibleFileNotFound( "file or module does not exist: %s" % in_path) - if self.can_copy_bothways: - # only docker >= 1.8.1 can do this natively - args = [ self.docker_cmd, "cp", in_path, "%s:%s" % (self._play_context.remote_addr, out_path) ] - args = [to_bytes(i, errors='strict') for i in args] - p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out_path = pipes.quote(out_path) + # Older docker doesn't have native support for copying files into + # running containers, so we use docker exec to implement this + # Although docker version 1.8 and later provide support, the + # owner and group of the files are always set to root + args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s" % (out_path, BUFSIZE)]) + args = [to_bytes(i, errors='strict') for i in args] + with open(to_bytes(in_path, errors='strict'), 'rb') as in_file: + try: + p = subprocess.Popen(args, stdin=in_file, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except OSError: + raise AnsibleError("docker connection requires dd command in the container to put files") stdout, stderr = p.communicate() + if p.returncode != 0: raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) - else: - out_path = pipes.quote(out_path) - # Older docker doesn't have native support for copying files into - # running containers, so we use docker exec to implement this - executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' - args = [self.docker_cmd, "exec", '-u', self._play_context.remote_user, "-i", self._play_context.remote_addr, executable, "-c", - "dd of=%s bs=%s" % (out_path, BUFSIZE)] - args = [to_bytes(i, errors='strict') for i in args] - with open(to_bytes(in_path, errors='strict'), 'rb') as in_file: - try: - p = subprocess.Popen(args, stdin=in_file, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - except OSError: - raise AnsibleError("docker connection with docker < 1.8.1 requires dd command in the chroot") - stdout, stderr = p.communicate() - - if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) def fetch_file(self, in_path, out_path): """ Fetch a file from container to local. """ From 24ed7b65d0519ba54db9dfec1b6ab41da6c18556 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Fri, 18 Mar 2016 20:51:23 -0700 Subject: [PATCH 1056/1113] minor changelog updates --- CHANGELOG.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a5f1d4265cb..09ec7a0d544 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ Ansible Changes By Release ###Major Changes: * added facility for modules to send back 'diff' for display when ansible is called with --diff, updated several modules to return this info -* added ansible-console tool, a REPL shell that allows running adhoc tasks against a chosen inventory (based on https://github.com/dominis/ansible-shell ) +* added ansible-console tool, a REPL shell that allows running adhoc tasks against a chosen inventory (based on https://github.com/dominis/ansible-shell) ####New Modules: - aws @@ -43,8 +43,9 @@ Ansible Changes By Release ###Minor Changes: * callbacks now have access to the options with which the CLI was called -* debug is now controlable with verbosity +* debug now has verbosity option to control when to display by matching number of -v in command line * modules now get verbosity, diff and other flags as passed to ansible +* mount facts now also show 'network mounts' that use the pattern `<host>:/<mount>` ## 2.0.1 "Over the Hills and Far Away" From 24c4384f0e7db79a5256d92c369d5d1195bbb2a0 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Sat, 19 Mar 2016 11:13:38 -0700 Subject: [PATCH 1057/1113] Add missing to_bytes for cmd. --- lib/ansible/plugins/connection/paramiko_ssh.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py index 150d168bf5f..88fbfbb8178 100644 --- a/lib/ansible/plugins/connection/paramiko_ssh.py +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -272,6 +272,8 @@ class Connection(ConnectionBase): display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr) + cmd = to_bytes(cmd, errors='strict') + no_prompt_out = '' no_prompt_err = '' become_output = '' From 49f8376051edc40577d766492050b910217edbd2 Mon Sep 17 00:00:00 2001 From: Michael Scherer <mscherer@users.noreply.github.com> Date: Sun, 20 Mar 2016 00:20:32 +0100 Subject: [PATCH 1058/1113] Fix small error (thse => these) --- docsite/rst/committer_guidelines.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/committer_guidelines.rst b/docsite/rst/committer_guidelines.rst index 9841d3b76ae..7d62d3612e6 100644 --- a/docsite/rst/committer_guidelines.rst +++ b/docsite/rst/committer_guidelines.rst @@ -12,7 +12,7 @@ If you abuse the trust and break components and builds, etc., the trust level fa Features, High Level Design, and Roadmap ======================================== -As a core team member, you are an integral part of the team that develops the roadmap. Please be engaged, and push for the features and fixes that you want to see. Also keep in mind that Red Hat, as a company, will commit to certain features, fixes, APIs, etc. for various releases. Red Hat, the company, and the Ansible team must get these committed features (etc.) completed and released as scheduled. Obligations to users, the community, and customers must come first. Because of thse commitments, a feature you want to develop yourself many not get into a release if it impacts a lot of other parts within Ansible. +As a core team member, you are an integral part of the team that develops the roadmap. Please be engaged, and push for the features and fixes that you want to see. Also keep in mind that Red Hat, as a company, will commit to certain features, fixes, APIs, etc. for various releases. Red Hat, the company, and the Ansible team must get these committed features (etc.) completed and released as scheduled. Obligations to users, the community, and customers must come first. Because of these commitments, a feature you want to develop yourself many not get into a release if it impacts a lot of other parts within Ansible. Any other new features and changes to high level design should go through the proposal process (TBD), to ensure the community and core team have had a chance to review the idea and approve it. The core team has sole responsibility for merging new features based on proposals. From da99e4e0aa151a45457047fe4a5770aafa4add19 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Sat, 19 Mar 2016 11:42:23 -0700 Subject: [PATCH 1059/1113] Add ssh client and server to docker containers. This will allow for future integration tests using ssh to localhost from within docker containers running on Travis. --- test/utils/docker/centos6/Dockerfile | 8 ++++++++ test/utils/docker/centos7/Dockerfile | 8 ++++++++ test/utils/docker/fedora-rawhide/Dockerfile | 7 +++++++ test/utils/docker/fedora23/Dockerfile | 7 +++++++ test/utils/docker/ubuntu1204/Dockerfile | 5 +++++ test/utils/docker/ubuntu1404/Dockerfile | 5 +++++ 6 files changed, 40 insertions(+) diff --git a/test/utils/docker/centos6/Dockerfile b/test/utils/docker/centos6/Dockerfile index ebca54ae65f..d0ffed83c06 100644 --- a/test/utils/docker/centos6/Dockerfile +++ b/test/utils/docker/centos6/Dockerfile @@ -13,6 +13,8 @@ RUN yum -y install \ subversion \ sudo \ unzip \ + openssh-clients \ + openssh-server \ which RUN yum -y install \ PyYAML \ @@ -33,5 +35,11 @@ RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts #VOLUME /sys/fs/cgroup /run /tmp +RUN ssh-keygen -q -t rsa1 -N '' -f /etc/ssh/ssh_host_key +RUN ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key +RUN ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key +RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa +RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys +RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container=docker CMD ["/sbin/init"] diff --git a/test/utils/docker/centos7/Dockerfile b/test/utils/docker/centos7/Dockerfile index 6e4f93dcaf7..1114c8adb99 100644 --- a/test/utils/docker/centos7/Dockerfile +++ b/test/utils/docker/centos7/Dockerfile @@ -21,6 +21,8 @@ RUN yum -y install \ subversion \ sudo \ unzip \ + openssh-clients \ + openssh-server \ which RUN yum -y install \ PyYAML \ @@ -38,5 +40,11 @@ RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts VOLUME /sys/fs/cgroup /run /tmp +RUN ssh-keygen -q -t rsa1 -N '' -f /etc/ssh/ssh_host_key +RUN ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key +RUN ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key +RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa +RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys +RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container=docker CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/fedora-rawhide/Dockerfile b/test/utils/docker/fedora-rawhide/Dockerfile index e587177607a..14c77dac3db 100644 --- a/test/utils/docker/fedora-rawhide/Dockerfile +++ b/test/utils/docker/fedora-rawhide/Dockerfile @@ -37,11 +37,18 @@ RUN dnf -y install \ tar \ unzip \ which \ + openssh-clients \ + openssh-server \ yum RUN localedef --quiet -c -i en_US -f UTF-8 en_US.UTF-8 RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts VOLUME /sys/fs/cgroup /run /tmp +RUN ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key +RUN ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key +RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa +RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys +RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container=docker CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/fedora23/Dockerfile b/test/utils/docker/fedora23/Dockerfile index d382794359a..1dcb12a254a 100644 --- a/test/utils/docker/fedora23/Dockerfile +++ b/test/utils/docker/fedora23/Dockerfile @@ -37,6 +37,8 @@ RUN dnf -y install \ tar \ unzip \ which \ + openssh-clients \ + openssh-server \ yum RUN localedef --quiet -f ISO-8859-1 -i pt_BR pt_BR RUN localedef --quiet -f ISO-8859-1 -i es_MX es_MX @@ -44,5 +46,10 @@ RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts VOLUME /sys/fs/cgroup /run /tmp +RUN ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key +RUN ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key +RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa +RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys +RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container=docker CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/ubuntu1204/Dockerfile b/test/utils/docker/ubuntu1204/Dockerfile index 347f9116136..f32181e5925 100644 --- a/test/utils/docker/ubuntu1204/Dockerfile +++ b/test/utils/docker/ubuntu1204/Dockerfile @@ -11,6 +11,8 @@ RUN apt-get install -y \ rubygems \ subversion \ sudo \ + openssh-client \ + openssh-server \ unzip # helpful things taken from the ubuntu-upstart Dockerfile: @@ -60,5 +62,8 @@ RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /bin/echo -e "[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts RUN locale-gen en_US.UTF-8 +RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa +RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys +RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container docker CMD ["/sbin/init"] diff --git a/test/utils/docker/ubuntu1404/Dockerfile b/test/utils/docker/ubuntu1404/Dockerfile index 25493c1f86e..521e1b664dc 100644 --- a/test/utils/docker/ubuntu1404/Dockerfile +++ b/test/utils/docker/ubuntu1404/Dockerfile @@ -10,6 +10,8 @@ RUN apt-get install -y \ ruby \ subversion \ sudo \ + openssh-client \ + openssh-server \ unzip # helpful things taken from the ubuntu-upstart Dockerfile: @@ -57,5 +59,8 @@ RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /bin/echo -e "[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts RUN locale-gen en_US.UTF-8 +RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa +RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys +RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container docker CMD ["/sbin/init"] From cbd93b6ad30deb4a49e7fff144848cfee0a83d1d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 20 Mar 2016 07:58:23 -0700 Subject: [PATCH 1060/1113] Add integration test for #11821 --- .../roles/test_get_url/tasks/main.yml | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 46d9ee275d5..2038f28f815 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -150,3 +150,37 @@ get_url: url: 'http://httpbin.org/redirect/6' dest: "{{ output_dir }}/redirect.json" + +- name: Test that setting file modes work + get_url: + url: 'http://www.ansible.com/' + dest: '{{ output_dir }}/test' + mode: '0707' + register: result + +- stat: + path: "{{ output_dir }}/test" + register: stat_result + +- name: Assert that the file has the right permissions + assert: + that: + - "result.changed == true" + - "stat_result.stat.mode == '0707'" + +- name: Test that setting file modes on an already downlaoded file work + get_url: + url: 'http://www.ansible.com/' + dest: '{{ output_dir }}/test' + mode: '0070' + register: result + +- stat: + path: "{{ output_dir }}/test" + register: stat_result + +- name: Assert that the file has the right permissions + assert: + that: + - "result.changed == true" + - "stat_result.stat.mode == '0070'" From 4a00999875e1a5ef2d753ff6bc3be29b61200944 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 20 Mar 2016 08:03:58 -0700 Subject: [PATCH 1061/1113] Update to latest git submodules --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 345d9cbca86..d71b9bae893 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 345d9cbca86a8202f3044261c84429c305bd78b8 +Subproject commit d71b9bae893e5cb860eb7717af93d00363b3890c diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f47b499bb99..d5030ae5550 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f47b499bb9935b77b92f71eecfe981b9073184d6 +Subproject commit d5030ae55501a5da4b79db5ee102311fcdd4225c From 9d2fe2fb2c6117cb31c4751adacdd2f2ca3556a2 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Sun, 20 Mar 2016 10:38:18 -0400 Subject: [PATCH 1062/1113] Fixing role param precedence issues * Make role param resolution follow the role dependency chain, rather than using all roles within the play * Also move params to be merged in after role vars in general, to match our variable precedence rules * Changes to the way var lookup is done in role helper methods for get_vars(), get_inherited_vars(), and get_role_params() to make the above possible without trampling on previously merged vars Fixes #14472 Fixes #14475 --- lib/ansible/playbook/role/__init__.py | 16 ++++++++-------- lib/ansible/vars/__init__.py | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 9b406ae7ba4..133dd50acca 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -259,31 +259,31 @@ class Role(Base, Become, Conditional, Taggable): default_vars = combine_vars(default_vars, self._default_vars) return default_vars - def get_inherited_vars(self, dep_chain=[], include_params=True): + def get_inherited_vars(self, dep_chain=[]): inherited_vars = dict() if dep_chain: for parent in dep_chain: inherited_vars = combine_vars(inherited_vars, parent._role_vars) - if include_params: - inherited_vars = combine_vars(inherited_vars, parent._role_params) return inherited_vars - def get_role_params(self): + def get_role_params(self, dep_chain=[]): params = {} - for dep in self.get_all_dependencies(): - params = combine_vars(params, dep._role_params) + if dep_chain: + for parent in dep_chain: + params = combine_vars(params, parent._role_params) + params = combine_vars(params, self._role_params) return params def get_vars(self, dep_chain=[], include_params=True): - all_vars = self.get_inherited_vars(dep_chain, include_params=include_params) + all_vars = self.get_inherited_vars(dep_chain) for dep in self.get_all_dependencies(): all_vars = combine_vars(all_vars, dep.get_vars(include_params=include_params)) all_vars = combine_vars(all_vars, self._role_vars) if include_params: - all_vars = combine_vars(all_vars, self._role_params) + all_vars = combine_vars(all_vars, self.get_role_params(dep_chain=dep_chain)) return all_vars diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index e881c47a5b4..2d1a872f294 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -308,12 +308,12 @@ class VariableManager: if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): - all_vars = combine_vars(all_vars, role.get_role_params()) all_vars = combine_vars(all_vars, role.get_vars(include_params=False)) if task: if task._role: all_vars = combine_vars(all_vars, task._role.get_vars()) + all_vars = combine_vars(all_vars, task._role.get_role_params(task._block._dep_chain)) all_vars = combine_vars(all_vars, task.get_vars()) if host: From 6824f3a7ccfd5be432b3a8dc68626815951d5b4c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 20 Mar 2016 08:49:40 -0700 Subject: [PATCH 1063/1113] Change url so that we don't test https in the tests for file perms --- test/integration/roles/test_get_url/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 2038f28f815..61b26f08f61 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -153,7 +153,7 @@ - name: Test that setting file modes work get_url: - url: 'http://www.ansible.com/' + url: 'http://httpbin.org/' dest: '{{ output_dir }}/test' mode: '0707' register: result @@ -170,7 +170,7 @@ - name: Test that setting file modes on an already downlaoded file work get_url: - url: 'http://www.ansible.com/' + url: 'http://httpbin.org/' dest: '{{ output_dir }}/test' mode: '0070' register: result From 4224c11b5fcc8e8e641b2b744a5020aa3a511e13 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Sun, 20 Mar 2016 10:48:11 -0700 Subject: [PATCH 1064/1113] Combine new RUN commands into one RUN command. --- test/utils/docker/centos6/Dockerfile | 12 ++++++------ test/utils/docker/centos7/Dockerfile | 12 ++++++------ test/utils/docker/fedora-rawhide/Dockerfile | 10 +++++----- test/utils/docker/fedora23/Dockerfile | 10 +++++----- test/utils/docker/ubuntu1204/Dockerfile | 6 +++--- test/utils/docker/ubuntu1404/Dockerfile | 6 +++--- 6 files changed, 28 insertions(+), 28 deletions(-) diff --git a/test/utils/docker/centos6/Dockerfile b/test/utils/docker/centos6/Dockerfile index d0ffed83c06..dd53b6e2efc 100644 --- a/test/utils/docker/centos6/Dockerfile +++ b/test/utils/docker/centos6/Dockerfile @@ -35,11 +35,11 @@ RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts #VOLUME /sys/fs/cgroup /run /tmp -RUN ssh-keygen -q -t rsa1 -N '' -f /etc/ssh/ssh_host_key -RUN ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key -RUN ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key -RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa -RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys -RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done +RUN ssh-keygen -q -t rsa1 -N '' -f /etc/ssh/ssh_host_key && \ + ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key && \ + ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key && \ + ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa && \ + cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys && \ + for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container=docker CMD ["/sbin/init"] diff --git a/test/utils/docker/centos7/Dockerfile b/test/utils/docker/centos7/Dockerfile index 1114c8adb99..a23707ef582 100644 --- a/test/utils/docker/centos7/Dockerfile +++ b/test/utils/docker/centos7/Dockerfile @@ -40,11 +40,11 @@ RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts VOLUME /sys/fs/cgroup /run /tmp -RUN ssh-keygen -q -t rsa1 -N '' -f /etc/ssh/ssh_host_key -RUN ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key -RUN ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key -RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa -RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys -RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done +RUN ssh-keygen -q -t rsa1 -N '' -f /etc/ssh/ssh_host_key && \ + ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key && \ + ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key && \ + ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa && \ + cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys && \ + for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container=docker CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/fedora-rawhide/Dockerfile b/test/utils/docker/fedora-rawhide/Dockerfile index 14c77dac3db..7587b93ab46 100644 --- a/test/utils/docker/fedora-rawhide/Dockerfile +++ b/test/utils/docker/fedora-rawhide/Dockerfile @@ -45,10 +45,10 @@ RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts VOLUME /sys/fs/cgroup /run /tmp -RUN ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key -RUN ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key -RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa -RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys -RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done +RUN ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key && \ + ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key && \ + ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa && \ + cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys && \ + for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container=docker CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/fedora23/Dockerfile b/test/utils/docker/fedora23/Dockerfile index 1dcb12a254a..a0563743878 100644 --- a/test/utils/docker/fedora23/Dockerfile +++ b/test/utils/docker/fedora23/Dockerfile @@ -46,10 +46,10 @@ RUN /usr/bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /usr/bin/echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts VOLUME /sys/fs/cgroup /run /tmp -RUN ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key -RUN ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key -RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa -RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys -RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done +RUN ssh-keygen -q -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key && \ + ssh-keygen -q -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key && \ + ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa && \ + cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys && \ + for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container=docker CMD ["/usr/sbin/init"] diff --git a/test/utils/docker/ubuntu1204/Dockerfile b/test/utils/docker/ubuntu1204/Dockerfile index f32181e5925..6924bba7032 100644 --- a/test/utils/docker/ubuntu1204/Dockerfile +++ b/test/utils/docker/ubuntu1204/Dockerfile @@ -62,8 +62,8 @@ RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /bin/echo -e "[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts RUN locale-gen en_US.UTF-8 -RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa -RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys -RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done +RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa && \ + cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys && \ + for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container docker CMD ["/sbin/init"] diff --git a/test/utils/docker/ubuntu1404/Dockerfile b/test/utils/docker/ubuntu1404/Dockerfile index 521e1b664dc..51d24c5e03c 100644 --- a/test/utils/docker/ubuntu1404/Dockerfile +++ b/test/utils/docker/ubuntu1404/Dockerfile @@ -59,8 +59,8 @@ RUN /bin/sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers RUN mkdir /etc/ansible/ RUN /bin/echo -e "[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts RUN locale-gen en_US.UTF-8 -RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa -RUN cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys -RUN for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done +RUN ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa && \ + cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys && \ + for key in /etc/ssh/ssh_host_*_key.pub; do echo "localhost $(cat ${key})" >> /root/.ssh/known_hosts; done ENV container docker CMD ["/sbin/init"] From 38092dcc27d0ad42e59d604713526f453232c4d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A4=8F=E6=81=BA=28Xia=20Kai=29?= <xiaket@gmail.com> Date: Mon, 21 Mar 2016 07:21:54 +0000 Subject: [PATCH 1065/1113] import the base class and check whether this obj has the required base class. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 夏恺(Xia Kai) <xiaket@gmail.com> --- lib/ansible/plugins/__init__.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index afeab7a1038..078ab459366 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -329,8 +329,13 @@ class PluginLoader: obj = getattr(self._module_cache[path], self.class_name) else: obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs) - if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__mro__]: - return None + if self.base_class: + # The import path is hardcoded and should be the right place, + # so we are not expecting an ImportError. + module = __import__(self.package, fromlist=[self.base_class]) + # Check whether this obj has the required base class. + if not issubclass(obj.__class__, getattr(module, self.base_class, None)): + return None return obj From c7e1a65da4ff06ead2202dcb3eea30787566ca86 Mon Sep 17 00:00:00 2001 From: Gerben Geijteman <gerben.geijteman@fdmediagroep.nl> Date: Wed, 2 Mar 2016 13:12:30 +0100 Subject: [PATCH 1066/1113] Proposed change to documentation to elaborate on new notation style for with_items / with_subelements --- docsite/rst/playbooks_loops.rst | 4 ++-- samples/with_subelements.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/playbooks_loops.rst b/docsite/rst/playbooks_loops.rst index 5d88da5af8d..e329d7650de 100644 --- a/docsite/rst/playbooks_loops.rst +++ b/docsite/rst/playbooks_loops.rst @@ -204,7 +204,7 @@ It might happen like so:: - authorized_key: "user={{ item.0.name }} key='{{ lookup('file', item.1) }}'" with_subelements: - - users + - "{{ users }}" - authorized Given the mysql hosts and privs subkey lists, you can also iterate over a list in a nested subkey:: @@ -212,7 +212,7 @@ Given the mysql hosts and privs subkey lists, you can also iterate over a list i - name: Setup MySQL users mysql_user: name={{ item.0.name }} password={{ item.0.mysql.password }} host={{ item.1 }} priv={{ item.0.mysql.privs | join('/') }} with_subelements: - - users + - "{{ users }}" - mysql.hosts Subelements walks a list of hashes (aka dictionaries) and then traverses a list with a given (nested sub-)key inside of those diff --git a/samples/with_subelements.yml b/samples/with_subelements.yml index 95d0dda67c6..0037f4a6311 100644 --- a/samples/with_subelements.yml +++ b/samples/with_subelements.yml @@ -14,5 +14,5 @@ tasks: - debug: msg="user={{ item.0.name }} key='{{ item.1 }}'" with_subelements: - - users + - "{{ users }}" - authorized From 4938b98e4eebe13ea573d59af85d776b9a26c561 Mon Sep 17 00:00:00 2001 From: Tobias Wolf <towolf@gmail.com> Date: Mon, 21 Mar 2016 14:59:57 +0100 Subject: [PATCH 1067/1113] For `actionable` cb plugin also hide ok for itemized results The purpose of the `actionable` callback plugin is to hide uninteresting results. Also hide the ok results when the task was itemized. --- lib/ansible/plugins/callback/actionable.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/callback/actionable.py b/lib/ansible/plugins/callback/actionable.py index 32ffb77a44c..15bd8ca8353 100644 --- a/lib/ansible/plugins/callback/actionable.py +++ b/lib/ansible/plugins/callback/actionable.py @@ -50,8 +50,6 @@ class CallbackModule(CallbackModule_default): if result._result.get('changed', False): self.display_task_banner() self.super_ref.v2_runner_on_ok(result) - else: - pass def v2_runner_on_unreachable(self, result): self.display_task_banner() @@ -64,8 +62,9 @@ class CallbackModule(CallbackModule_default): pass def v2_runner_item_on_ok(self, result): - self.display_task_banner() - self.super_ref.v2_runner_item_on_ok(result) + if result._result.get('changed', False): + self.display_task_banner() + self.super_ref.v2_runner_item_on_ok(result) def v2_runner_item_on_skipped(self, result): pass From 3b8d7531218c0560a39b83e31d34f2cf41997931 Mon Sep 17 00:00:00 2001 From: Owen Tuz <owen.tuz@telegraph.co.uk> Date: Mon, 21 Mar 2016 14:58:55 +0000 Subject: [PATCH 1068/1113] Avoid cache conflicts when using multiple AWS accounts --- contrib/inventory/ec2.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index 02fdfdf8907..f0214b61f06 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -331,8 +331,14 @@ class Ec2Inventory(object): if not os.path.exists(cache_dir): os.makedirs(cache_dir) - self.cache_path_cache = cache_dir + "/ansible-ec2.cache" - self.cache_path_index = cache_dir + "/ansible-ec2.index" + cache_name = 'ansible-ec2' + aws_profile = lambda: (self.boto_profile or + os.environ.get('AWS_PROFILE') or + os.environ.get('AWS_ACCESS_KEY_ID')) + if aws_profile(): + cache_name = '%s-%s' % (cache_name, aws_profile()) + self.cache_path_cache = cache_dir + "/%s.cache" % cache_name + self.cache_path_index = cache_dir + "/%s.index" % cache_name self.cache_max_age = config.getint('ec2', 'cache_max_age') if config.has_option('ec2', 'expand_csv_tags'): From ae21d98955f0d7367ea94610c14e66b78c7ad4ca Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 21 Mar 2016 11:00:07 -0400 Subject: [PATCH 1069/1113] Properly use check_raw when using parse_kv in cli/ code Fixes ansible-modules-core#3070 --- lib/ansible/cli/adhoc.py | 3 ++- lib/ansible/cli/console.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index dcb65ad135f..52ab761d523 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -81,11 +81,12 @@ class AdHocCLI(CLI): return True def _play_ds(self, pattern, async, poll): + check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw') return dict( name = "Ansible Ad-Hoc", hosts = pattern, gather_facts = 'no', - tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args)), async=async, poll=poll) ] + tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args, check_raw=check_raw)), async=async, poll=poll) ] ) def run(self): diff --git a/lib/ansible/cli/console.py b/lib/ansible/cli/console.py index 38ec0379341..c6b27f8f663 100644 --- a/lib/ansible/cli/console.py +++ b/lib/ansible/cli/console.py @@ -183,12 +183,12 @@ class ConsoleCLI(CLI, cmd.Cmd): result = None try: + check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw') play_ds = dict( name = "Ansible Shell", hosts = self.options.cwd, gather_facts = 'no', - #tasks = [ dict(action=dict(module=module, args=parse_kv(module_args)), async=self.options.async, poll=self.options.poll_interval) ] - tasks = [ dict(action=dict(module=module, args=parse_kv(module_args)))] + tasks = [ dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)))] ) play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader) except Exception as e: From 7ce130212fe34b1fa5c0753f0ca25869081f83fb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 21 Mar 2016 08:39:57 -0700 Subject: [PATCH 1070/1113] Cleanup some minor issues in PluginLoader: * class_only was a keyword arg of get() and all() that was mistakenly passed on to Plugins. Be sure to strip it from the keyword args before instantiating Plugins. (Reworked API probably should either not instantiate Plugins or take the args for the Plugin as a separate list and a dict.) * Checking required base_classes was only done in get() and only if class_only was False (ie: that Plugin was instantiated). This meant that different plugins could be found depending on whether the call was to .get() or to all() and whether it was for classes or instances. Fixed so that required base_classes are always checked. --- lib/ansible/plugins/__init__.py | 48 +++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 17 deletions(-) diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index 83956753dc2..9ac54a8d062 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -316,6 +316,7 @@ class PluginLoader: def get(self, name, *args, **kwargs): ''' instantiates a plugin of the given name using arguments ''' + class_only = kwargs.pop('class_only', False) if name in self.aliases: name = self.aliases[name] path = self.find_plugin(name) @@ -325,23 +326,28 @@ class PluginLoader: if path not in self._module_cache: self._module_cache[path] = self._load_module_source('.'.join([self.package, name]), path) - if kwargs.get('class_only', False): - obj = getattr(self._module_cache[path], self.class_name) - else: - obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs) - if self.base_class: - # The import path is hardcoded and should be the right place, - # so we are not expecting an ImportError. - module = __import__(self.package, fromlist=[self.base_class]) - # Check whether this obj has the required base class. - if not issubclass(obj.__class__, getattr(module, self.base_class, None)): - return None + obj = getattr(self._module_cache[path], self.class_name) + if self.base_class: + # The import path is hardcoded and should be the right place, + # so we are not expecting an ImportError. + module = __import__(self.package, fromlist=[self.base_class]) + # Check whether this obj has the required base class. + try: + plugin_class = getattr(module, self.base_class) + except AttributeError: + return None + if not issubclass(obj, plugin_class): + return None + + if not class_only: + obj = obj(*args, **kwargs) return obj def all(self, *args, **kwargs): ''' instantiates all plugins with the same arguments ''' + class_only = kwargs.pop('class_only', False) for i in self._get_paths(): matches = glob.glob(os.path.join(i, "*.py")) matches.sort() @@ -353,13 +359,21 @@ class PluginLoader: if path not in self._module_cache: self._module_cache[path] = self._load_module_source(name, path) - if kwargs.get('class_only', False): - obj = getattr(self._module_cache[path], self.class_name) - else: - obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs) - - if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]: + obj = getattr(self._module_cache[path], self.class_name) + if self.base_class: + # The import path is hardcoded and should be the right place, + # so we are not expecting an ImportError. + module = __import__(self.package, fromlist=[self.base_class]) + # Check whether this obj has the required base class. + try: + plugin_class = getattr(module, self.base_class) + except AttributeError: continue + if not issubclass(obj, plugin_class): + continue + + if not class_only: + obj = obj(*args, **kwargs) # set extra info on the module, in case we want it later setattr(obj, '_original_path', path) From ed9e164b8052a11eb56987935594f54a61878c0a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Sun, 20 Mar 2016 10:33:51 -0700 Subject: [PATCH 1071/1113] Fix ansible_sudo_pass inventory variable so that it overrides setting of sudo password from the command line --- lib/ansible/playbook/play_context.py | 28 ++++- test/units/playbook/test_play_context.py | 135 +++++++++++++++++++++++ 2 files changed, 157 insertions(+), 6 deletions(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index dce14c9b7fb..98bc8ba3ede 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -361,7 +361,6 @@ class PlayContext(Base): if exe_var in variables: setattr(new_info, 'executable', variables.get(exe_var)) - attrs_considered = [] for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING): for variable_name in variable_names: @@ -377,17 +376,34 @@ class PlayContext(Base): attrs_considered.append(attr) # no else, as no other vars should be considered - # make sure we get port defaults if needed - if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None: - new_info.port = int(C.DEFAULT_REMOTE_PORT) - - # become legacy updates + # become legacy updates -- from commandline if not new_info.become_pass: if new_info.become_method == 'sudo' and new_info.sudo_pass: setattr(new_info, 'become_pass', new_info.sudo_pass) elif new_info.become_method == 'su' and new_info.su_pass: setattr(new_info, 'become_pass', new_info.su_pass) + # become legacy updates -- from inventory file (inventory overrides + # commandline) + for become_pass_name in MAGIC_VARIABLE_MAPPING.get('become_pass'): + if become_pass_name in variables: + break + else: # This is a for-else + if new_info.become_method == 'sudo': + for sudo_pass_name in MAGIC_VARIABLE_MAPPING.get('sudo_pass'): + if sudo_pass_name in variables: + setattr(new_info, 'become_pass', variables[sudo_pass_name]) + break + if new_info.become_method == 'sudo': + for su_pass_name in MAGIC_VARIABLE_MAPPING.get('su_pass'): + if su_pass_name in variables: + setattr(new_info, 'become_pass', variables[su_pass_name]) + break + + # make sure we get port defaults if needed + if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None: + new_info.port = int(C.DEFAULT_REMOTE_PORT) + # special overrides for the connection setting if len(delegated_vars) > 0: # in the event that we were using local before make sure to reset the diff --git a/test/units/playbook/test_play_context.py b/test/units/playbook/test_play_context.py index 2e2c2238cc2..c18f9e637e7 100644 --- a/test/units/playbook/test_play_context.py +++ b/test/units/playbook/test_play_context.py @@ -166,3 +166,138 @@ class TestPlayContext(unittest.TestCase): play_context.become_method = 'bad' self.assertRaises(AnsibleError, play_context.make_become_cmd, cmd=default_cmd, executable="/bin/bash") +class TestTaskAndVariableOverrride(unittest.TestCase): + + inventory_vars = ( + ('preferred_names', + dict(ansible_connection='local', + ansible_user='ansibull', + ansible_become_user='ansibull', + ansible_become_method='su', + ansible_become_pass='ansibullwuzhere',), + dict(connection='local', + remote_user='ansibull', + become_user='ansibull', + become_method='su', + become_pass='ansibullwuzhere',) + ), + ('alternate_names', + dict(ansible_become_password='ansibullwuzhere',), + dict(become_pass='ansibullwuzhere',) + ), + ('deprecated_names', + dict(ansible_ssh_user='ansibull', + ansible_sudo_user='ansibull', + ansible_sudo_pass='ansibullwuzhere',), + dict(remote_user='ansibull', + become_method='sudo', + become_user='ansibull', + become_pass='ansibullwuzhere',) + ), + ('deprecated_names2', + dict(ansible_ssh_user='ansibull', + ansible_su_user='ansibull', + ansible_su_pass='ansibullwuzhere',), + dict(remote_user='ansibull', + become_method='su', + become_user='ansibull', + become_pass='ansibullwuzhere',) + ), + ('deprecated_alt_names', + dict(ansible_sudo_password='ansibullwuzhere',), + dict(become_method='sudo', + become_pass='ansibullwuzhere',) + ), + ('deprecated_alt_names2', + dict(ansible_su_password='ansibullwuzhere',), + dict(become_method='su', + become_pass='ansibullwuzhere',) + ), + ('deprecated_and_preferred_names', + dict(ansible_user='ansibull', + ansible_ssh_user='badbull', + ansible_become_user='ansibull', + ansible_sudo_user='badbull', + ansible_become_method='su', + ansible_become_pass='ansibullwuzhere', + ansible_sudo_pass='badbull', + ), + dict(connection='local', + remote_user='ansibull', + become_user='ansibull', + become_method='su', + become_pass='ansibullwuzhere',) + ), + ) + + def setUp(self): + parser = CLI.base_parser( + runas_opts = True, + meta_opts = True, + runtask_opts = True, + vault_opts = True, + async_opts = True, + connect_opts = True, + subset_opts = True, + check_opts = True, + inventory_opts = True, + ) + + (options, args) = parser.parse_args(['-vv', '--check']) + + mock_play = MagicMock() + mock_play.connection = 'mock' + mock_play.remote_user = 'mock' + mock_play.port = 1234 + mock_play.become = True + mock_play.become_method = 'mock' + mock_play.become_user = 'mockroot' + mock_play.no_log = True + + self.play_context = PlayContext(play=mock_play, options=options) + + mock_task = MagicMock() + mock_task.connection = mock_play.connection + mock_task.remote_user = mock_play.remote_user + mock_task.no_log = mock_play.no_log + mock_task.become = mock_play.become + mock_task.become_method = mock_play.becom_method + mock_task.become_user = mock_play.become_user + mock_task.become_pass = 'mocktaskpass' + mock_task._local_action = False + mock_task.delegate_to = None + + self.mock_task = mock_task + + self.mock_templar = MagicMock() + + def tearDown(self): + pass + + def _check_vars_overridden(self): + self.assertEqual(play_context.connection, 'mock_inventory') + self.assertEqual(play_context.remote_user, 'mocktask') + self.assertEqual(play_context.port, 4321) + self.assertEqual(play_context.no_log, True) + self.assertEqual(play_context.become, True) + self.assertEqual(play_context.become_method, "mocktask") + self.assertEqual(play_context.become_user, "mocktaskroot") + self.assertEqual(play_context.become_pass, "mocktaskpass") + + mock_task.no_log = False + play_context = play_context.set_task_and_variable_override(task=mock_task, variables=all_vars, templar=mock_templar) + self.assertEqual(play_context.no_log, False) + + def test_override_magic_variables(self): + play_context = play_context.set_task_and_variable_override(task=self.mock_task, variables=all_vars, templar=self.mock_templar) + + mock_play.connection = 'mock' + mock_play.remote_user = 'mock' + mock_play.port = 1234 + mock_play.become_method = 'mock' + mock_play.become_user = 'mockroot' + mock_task.become_pass = 'mocktaskpass' + # Inventory vars override things set from cli vars (--become, -user, + # etc... [notably, not --extravars]) + for test_name, all_vars, expected in self.inventory_vars: + yield self._check_vars_overriden, test_name, all_vars, expected From fea5500605e7785f1d048e0b1b7ac0e92cbb77cc Mon Sep 17 00:00:00 2001 From: Robin Roth <robin-roth@online.de> Date: Tue, 22 Mar 2016 07:50:04 +0100 Subject: [PATCH 1072/1113] remove double read of "path" path is already read in line 444, don't reread the file here, but use the existing content --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 203a72996d1..c65d4a466df 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -466,7 +466,7 @@ class Facts(object): elif path == '/etc/SuSE-release': if 'open' in data.lower(): data = data.splitlines() - distdata = get_file_content(path).split('\n')[0] + distdata = data[0] self.facts['distribution'] = distdata.split()[0] for line in data: release = re.search('CODENAME *= *([^\n]+)', line) From 2c28dcc5cbf076c37b69c0e102c9ee449acbeacb Mon Sep 17 00:00:00 2001 From: Robin Roth <robin-roth@online.de> Date: Tue, 22 Mar 2016 08:12:17 +0100 Subject: [PATCH 1073/1113] break after parsing SuSE-release in facts.py All sections that lead to succesful parsing of the version break afterwards, for SuSE-release this break was missing, potentiall causing #14837 --- lib/ansible/module_utils/facts.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 203a72996d1..816dfb47382 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -484,6 +484,7 @@ class Facts(object): if release: self.facts['distribution_release'] = release.group(1) self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1) + break elif name == 'Debian': data = get_file_content(path) if 'Debian' in data or 'Raspbian' in data: From 395ff361d45805f7d7a16ae8cd06b382464d60b7 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 22 Mar 2016 07:53:50 -0400 Subject: [PATCH 1074/1113] Adding acl package to all docker images --- test/utils/docker/centos6/Dockerfile | 1 + test/utils/docker/centos7/Dockerfile | 1 + test/utils/docker/fedora-rawhide/Dockerfile | 1 + test/utils/docker/fedora23/Dockerfile | 1 + test/utils/docker/ubuntu1204/Dockerfile | 1 + test/utils/docker/ubuntu1404/Dockerfile | 1 + 6 files changed, 6 insertions(+) diff --git a/test/utils/docker/centos6/Dockerfile b/test/utils/docker/centos6/Dockerfile index dd53b6e2efc..127697d6ee3 100644 --- a/test/utils/docker/centos6/Dockerfile +++ b/test/utils/docker/centos6/Dockerfile @@ -2,6 +2,7 @@ FROM centos:centos6 RUN yum -y update; yum clean all; RUN yum -y install \ + acl \ epel-release \ file \ gcc \ diff --git a/test/utils/docker/centos7/Dockerfile b/test/utils/docker/centos7/Dockerfile index a23707ef582..a3133178a7f 100644 --- a/test/utils/docker/centos7/Dockerfile +++ b/test/utils/docker/centos7/Dockerfile @@ -10,6 +10,7 @@ rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ rm -f /lib/systemd/system/basic.target.wants/*; \ rm -f /lib/systemd/system/anaconda.target.wants/*; RUN yum -y install \ + acl \ dbus-python \ epel-release \ file \ diff --git a/test/utils/docker/fedora-rawhide/Dockerfile b/test/utils/docker/fedora-rawhide/Dockerfile index 7587b93ab46..55f0854032b 100644 --- a/test/utils/docker/fedora-rawhide/Dockerfile +++ b/test/utils/docker/fedora-rawhide/Dockerfile @@ -10,6 +10,7 @@ rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ rm -f /lib/systemd/system/basic.target.wants/*; \ rm -f /lib/systemd/system/anaconda.target.wants/*; RUN dnf -y install \ + acl \ dbus-python \ file \ findutils \ diff --git a/test/utils/docker/fedora23/Dockerfile b/test/utils/docker/fedora23/Dockerfile index a0563743878..728a934222c 100644 --- a/test/utils/docker/fedora23/Dockerfile +++ b/test/utils/docker/fedora23/Dockerfile @@ -10,6 +10,7 @@ rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ rm -f /lib/systemd/system/basic.target.wants/*; \ rm -f /lib/systemd/system/anaconda.target.wants/*; RUN dnf -y install \ + acl \ dbus-python \ file \ findutils \ diff --git a/test/utils/docker/ubuntu1204/Dockerfile b/test/utils/docker/ubuntu1204/Dockerfile index 6924bba7032..d7cbac6c9e7 100644 --- a/test/utils/docker/ubuntu1204/Dockerfile +++ b/test/utils/docker/ubuntu1204/Dockerfile @@ -1,6 +1,7 @@ FROM ubuntu:precise RUN apt-get clean; apt-get update -y; RUN apt-get install -y \ + acl \ debianutils \ gawk \ git \ diff --git a/test/utils/docker/ubuntu1404/Dockerfile b/test/utils/docker/ubuntu1404/Dockerfile index 51d24c5e03c..fe8a26eefd1 100644 --- a/test/utils/docker/ubuntu1404/Dockerfile +++ b/test/utils/docker/ubuntu1404/Dockerfile @@ -1,6 +1,7 @@ FROM ubuntu:trusty RUN apt-get clean; apt-get update -y; RUN apt-get install -y \ + acl \ debianutils \ gawk \ git \ From 081c33c451e2ff50e18c7a852ab273e06ef823dd Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 22 Mar 2016 09:49:19 -0400 Subject: [PATCH 1075/1113] Adding a docker pull to the run_tests.sh script --- test/utils/run_tests.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index eef7f1f60ed..03d8f653ef0 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -10,6 +10,7 @@ if [ "${TARGET}" = "sanity" ]; then else set -e export C_NAME="testAbull_$$_$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 8 | head -n 1)" + docker pull ansible/ansible:${TARGET} docker run -d --volume="${PWD}:/root/ansible:Z" --name "${C_NAME}" ${TARGET_OPTIONS} ansible/ansible:${TARGET} > /tmp/cid_${TARGET} docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c "export TEST_FLAGS='${TEST_FLAGS}'; cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)" docker kill $(cat /tmp/cid_${TARGET}) From a3d0c9e0068b07d3e098d2b25bfb2a48437ac82e Mon Sep 17 00:00:00 2001 From: Jason McKerr <mckerrj@gmail.com> Date: Tue, 22 Mar 2016 10:42:26 -0400 Subject: [PATCH 1076/1113] Update committer_guidelines.rst --- docsite/rst/committer_guidelines.rst | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/docsite/rst/committer_guidelines.rst b/docsite/rst/committer_guidelines.rst index 7d62d3612e6..3bb95a113b7 100644 --- a/docsite/rst/committer_guidelines.rst +++ b/docsite/rst/committer_guidelines.rst @@ -60,13 +60,23 @@ People ====== Individuals who've been asked to become a part of this group have generally been contributing in significant ways to the Ansible community for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, via a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act. -* James Cammarata (RedHat/Ansible) -* Brian Coca (RedHat/Ansible) -* Matt Davis (RedHat/Ansible) -* Toshio Kuratomi (RedHat/Ansible) -* Jason McKerr (RedHat/Ansible) -* Robyn Bergeron (RedHat/Ansible) -* Greg DeKoenigsberg (RedHat/Ansible +* James Cammarata +* Brian Coca +* Matt Davis +* Toshio Kuratomi +* Jason McKerr +* Robyn Bergeron +* Greg DeKoenigsberg * Monty Taylor * Matt Martz +* Nate Case +* James Tanner +* Peter Sprygata +* Abhijit Menon-Sen +* Michael Scherer +* René Moser +* David Shrewsbury +* Sandra Wills +* Graham Mainwaring +* Jon Davila From 18599047cd4a46260b05215e7f3c561c335fc1c7 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Mon, 21 Mar 2016 09:46:20 -0500 Subject: [PATCH 1077/1113] Use docker cp instead of docker volumes to allow for testing with remote docker servers --- test/utils/run_tests.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index 03d8f653ef0..f8d76c725c6 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -11,7 +11,8 @@ else set -e export C_NAME="testAbull_$$_$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 8 | head -n 1)" docker pull ansible/ansible:${TARGET} - docker run -d --volume="${PWD}:/root/ansible:Z" --name "${C_NAME}" ${TARGET_OPTIONS} ansible/ansible:${TARGET} > /tmp/cid_${TARGET} + docker run -d --name "${C_NAME}" ${TARGET_OPTIONS} ansible/ansible:${TARGET} > /tmp/cid_${TARGET} + docker cp ${PWD} $(cat /tmp/cid_${TARGET}):/root/ansible docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c "export TEST_FLAGS='${TEST_FLAGS}'; cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)" docker kill $(cat /tmp/cid_${TARGET}) From 9a2c1cf94d9a8a640fe02f3ecefb9e9c9a941d99 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 22 Mar 2016 11:45:26 -0400 Subject: [PATCH 1078/1113] Force removal of docker container after test ends --- test/utils/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index f8d76c725c6..b174c514c7e 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -17,6 +17,6 @@ else docker kill $(cat /tmp/cid_${TARGET}) if [ "X${TESTS_KEEP_CONTAINER}" = "X" ]; then - docker rm "${C_NAME}" + docker rm -f "${C_NAME}" fi fi From 06d77996da1121d5628ab85e9f44daaf49120d01 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 22 Mar 2016 09:21:49 -0700 Subject: [PATCH 1079/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index d71b9bae893..f15000d7d43 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit d71b9bae893e5cb860eb7717af93d00363b3890c +Subproject commit f15000d7d435ccc85e7dce9f8f92d32026be9ed6 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index d5030ae5550..aa80e700e5d 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit d5030ae55501a5da4b79db5ee102311fcdd4225c +Subproject commit aa80e700e5da8e7a65347cf4afbedfb75d50ae84 From 941e31e9683dcef3c5c732e1b50b221a0ef30815 Mon Sep 17 00:00:00 2001 From: David Blencowe <dblencowe@gmail.com> Date: Tue, 22 Mar 2016 16:49:03 +0000 Subject: [PATCH 1080/1113] Update the Mac warning in intro_installation.rst A setting of 1024 here causes other issues such as VMware not being able to start Virtual Machines. Changing to just "unlimited" as the option resolves this --- docsite/rst/intro_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 03e6bd7dfdd..0d2182b7c26 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -52,7 +52,7 @@ This includes Red Hat, Debian, CentOS, OS X, any of the BSDs, and so on. .. note:: As of 2.0 ansible uses a few more file handles to manage its forks, OS X has a very low setting so if you want to use 15 or more forks - you'll need to raise the ulimit, like so ``sudo launchctl limit maxfiles 1024 unlimited``. Or just any time you see a "Too many open files" error. + you'll need to raise the ulimit, like so ``sudo launchctl limit maxfiles unlimited``. Or just any time you see a "Too many open files" error. .. warning:: From 23a74eb12537f0747e16a9675874268361c16cc9 Mon Sep 17 00:00:00 2001 From: Joel <joel@deport.me> Date: Tue, 22 Mar 2016 12:12:51 -0500 Subject: [PATCH 1081/1113] Sort plugins by basename to support ordering callbacks --- lib/ansible/plugins/__init__.py | 52 +++++++++++++++++---------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index 9ac54a8d062..e190b219fb7 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -348,36 +348,38 @@ class PluginLoader: ''' instantiates all plugins with the same arguments ''' class_only = kwargs.pop('class_only', False) + all_matches = [] + for i in self._get_paths(): - matches = glob.glob(os.path.join(i, "*.py")) - matches.sort() - for path in matches: - name, _ = os.path.splitext(path) - if '__init__' in name: - continue + all_matches.extend(glob.glob(os.path.join(i, "*.py"))) - if path not in self._module_cache: - self._module_cache[path] = self._load_module_source(name, path) + for path in sorted(all_matches, key=lambda match: os.path.basename(match)): + name, _ = os.path.splitext(path) + if '__init__' in name: + continue - obj = getattr(self._module_cache[path], self.class_name) - if self.base_class: - # The import path is hardcoded and should be the right place, - # so we are not expecting an ImportError. - module = __import__(self.package, fromlist=[self.base_class]) - # Check whether this obj has the required base class. - try: - plugin_class = getattr(module, self.base_class) - except AttributeError: - continue - if not issubclass(obj, plugin_class): - continue + if path not in self._module_cache: + self._module_cache[path] = self._load_module_source(name, path) - if not class_only: - obj = obj(*args, **kwargs) + obj = getattr(self._module_cache[path], self.class_name) + if self.base_class: + # The import path is hardcoded and should be the right place, + # so we are not expecting an ImportError. + module = __import__(self.package, fromlist=[self.base_class]) + # Check whether this obj has the required base class. + try: + plugin_class = getattr(module, self.base_class) + except AttributeError: + continue + if not issubclass(obj, plugin_class): + continue - # set extra info on the module, in case we want it later - setattr(obj, '_original_path', path) - yield obj + if not class_only: + obj = obj(*args, **kwargs) + + # set extra info on the module, in case we want it later + setattr(obj, '_original_path', path) + yield obj action_loader = PluginLoader( 'ActionModule', From 3a8f3ece531bf98b8d91658a024949035bcb3294 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 22 Mar 2016 10:24:00 -0700 Subject: [PATCH 1082/1113] Add changelog entry for sorting of plugins --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 09ec7a0d544..f287ca30164 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,9 @@ Ansible Changes By Release * debug now has verbosity option to control when to display by matching number of -v in command line * modules now get verbosity, diff and other flags as passed to ansible * mount facts now also show 'network mounts' that use the pattern `<host>:/<mount>` +* Plugins are now sorted before loading. This means, for instance, if you want + two custom callback plugins to run in a certain order you can name them + 10-first-callback.py and 20-second-callback.py. ## 2.0.1 "Over the Hills and Far Away" From 90ffb8d8f0079a0f1d3d65b8fa7bb97d0245df81 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 22 Mar 2016 13:53:41 -0400 Subject: [PATCH 1083/1113] Also remove volumes when removing containers --- test/utils/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index b174c514c7e..b7bd5c3e282 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -17,6 +17,6 @@ else docker kill $(cat /tmp/cid_${TARGET}) if [ "X${TESTS_KEEP_CONTAINER}" = "X" ]; then - docker rm -f "${C_NAME}" + docker rm -vf "${C_NAME}" fi fi From 02e6ef2a813df0c43a4f18d8f3f5fd32b9b43360 Mon Sep 17 00:00:00 2001 From: Bill Nottingham <notting@splat.cc> Date: Tue, 22 Mar 2016 14:48:15 -0400 Subject: [PATCH 1084/1113] be brand-approved --- ROADMAP.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ROADMAP.md b/ROADMAP.md index d4982369d45..4d4b95b8dd5 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,4 +1,4 @@ -Roadmap For Ansible by RedHat +Roadmap For Ansible by Red Hat ============= This document is now the location for published Ansible Core roadmaps. From 6afed7083a99d14ec80a065cfde8ca1b1ce1efb2 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Tue, 22 Mar 2016 15:37:47 -0400 Subject: [PATCH 1085/1113] Revert "Use docker cp instead of docker volumes to allow for testing with remote docker servers" This reverts commit 18599047cd4a46260b05215e7f3c561c335fc1c7. --- test/utils/run_tests.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index b7bd5c3e282..bdfb392ff3a 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -11,8 +11,7 @@ else set -e export C_NAME="testAbull_$$_$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 8 | head -n 1)" docker pull ansible/ansible:${TARGET} - docker run -d --name "${C_NAME}" ${TARGET_OPTIONS} ansible/ansible:${TARGET} > /tmp/cid_${TARGET} - docker cp ${PWD} $(cat /tmp/cid_${TARGET}):/root/ansible + docker run -d --volume="${PWD}:/root/ansible:Z" --name "${C_NAME}" ${TARGET_OPTIONS} ansible/ansible:${TARGET} > /tmp/cid_${TARGET} docker exec -ti $(cat /tmp/cid_${TARGET}) /bin/sh -c "export TEST_FLAGS='${TEST_FLAGS}'; cd /root/ansible; . hacking/env-setup; (cd test/integration; LC_ALL=en_US.utf-8 make)" docker kill $(cat /tmp/cid_${TARGET}) From c3548677de87d96fbd094461ea620d4506ecec4d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Tue, 22 Mar 2016 13:19:09 -0700 Subject: [PATCH 1086/1113] Add TEST_FLAGS to test_hash --- test/integration/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 48382cd4c49..c372316e20e 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -122,8 +122,8 @@ test_handlers: [ "$$(ansible-playbook test_force_handlers.yml --force-handlers --tags force_false_in_play -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ] test_hash: - ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' - ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' + ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -e '{"test_hash":{"extra_args":"this is an extra arg"}}' + ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -e '{"test_hash":{"extra_args":"this is an extra arg"}}' test_var_precedence: setup ansible-playbook test_var_precedence.yml -i $(INVENTORY) $(CREDENTIALS_ARG) $(TEST_FLAGS) -v -e outputdir=$(TEST_DIR) -e 'extra_var=extra_var' -e 'extra_var_override=extra_var_override' From bb6e05a17649a4c3c2be0ec097f8a57d8e4eeaed Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Tue, 22 Mar 2016 20:33:40 -0500 Subject: [PATCH 1087/1113] Fix indentation in ovirt.py inventory --- contrib/inventory/ovirt.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/inventory/ovirt.py b/contrib/inventory/ovirt.py index dccbf421276..f406704ed65 100755 --- a/contrib/inventory/ovirt.py +++ b/contrib/inventory/ovirt.py @@ -172,9 +172,9 @@ class OVirtInventory(object): # If the appropriate environment variables are set, they override # other configuration; process those into our args and kwargs. - kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url']) - kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None) - kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None) + kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url']) + kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None) + kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None) # Retrieve and return the ovirt driver. return API(insecure=True, **kwargs) From a6d52ce098314567351c27a992c18c78804e0e05 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Tue, 22 Mar 2016 20:41:37 -0500 Subject: [PATCH 1088/1113] Ensure that any command in run_tests.sh that fails results in immediate failure --- test/utils/run_tests.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/utils/run_tests.sh b/test/utils/run_tests.sh index bdfb392ff3a..214b1c827c5 100755 --- a/test/utils/run_tests.sh +++ b/test/utils/run_tests.sh @@ -1,5 +1,7 @@ #!/bin/sh -x +set -e + if [ "${TARGET}" = "sanity" ]; then ./test/code-smell/replace-urlopen.sh . ./test/code-smell/use-compat-six.sh lib @@ -8,7 +10,6 @@ if [ "${TARGET}" = "sanity" ]; then if test x"$TOXENV" != x'py24' ; then tox ; fi if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi else - set -e export C_NAME="testAbull_$$_$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 8 | head -n 1)" docker pull ansible/ansible:${TARGET} docker run -d --volume="${PWD}:/root/ansible:Z" --name "${C_NAME}" ${TARGET_OPTIONS} ansible/ansible:${TARGET} > /tmp/cid_${TARGET} From ad30bad14f5bfcb04f86873748d68a9f2fe76419 Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Tue, 22 Mar 2016 21:59:03 -0500 Subject: [PATCH 1089/1113] Skip test_module_utils_basic__log_invocation until we can figure out the cause of the failure --- test/units/module_utils/basic/test__log_invocation.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/units/module_utils/basic/test__log_invocation.py b/test/units/module_utils/basic/test__log_invocation.py index a08a2d84ca0..b41d0bf0b39 100644 --- a/test/units/module_utils/basic/test__log_invocation.py +++ b/test/units/module_utils/basic/test__log_invocation.py @@ -27,6 +27,7 @@ from ansible.compat.tests.mock import MagicMock class TestModuleUtilsBasic(unittest.TestCase): + @unittest.skip("Skipping due to unknown reason. See #15105") def test_module_utils_basic__log_invocation(self): from ansible.module_utils import basic From 5bb876b0e21bb7b9e5a2e07f7a4a007264bcd122 Mon Sep 17 00:00:00 2001 From: Peter Sprygada <psprygada@ansible.com> Date: Wed, 23 Mar 2016 02:32:18 -0400 Subject: [PATCH 1090/1113] fixes issue with getting value with . (dot) in key in netcfg This commit addresses a problem when attempting to retrieve a value from the result that includes a dict key using . (dot). --- lib/ansible/module_utils/netcfg.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/lib/ansible/module_utils/netcfg.py b/lib/ansible/module_utils/netcfg.py index 644a0a32dfa..5ccaf8d5bc3 100644 --- a/lib/ansible/module_utils/netcfg.py +++ b/lib/ansible/module_utils/netcfg.py @@ -85,9 +85,8 @@ def parse(lines, indent): class Conditional(object): - ''' - Used in command modules to evaluate waitfor conditions - ''' + """Used in command modules to evaluate waitfor conditions + """ OPERATORS = { 'eq': ['eq', '=='], @@ -133,13 +132,20 @@ class Conditional(object): raise AttributeError('unknown operator: %s' % oper) def get_value(self, result): - for key in self.key.split('.'): - match = re.match(r'^(.+)\[(\d+)\]', key) + parts = re.split(r'\.(?=[^\]]*(?:\[|$))', self.key) + for part in parts: + match = re.findall(r'\[(\S+?)\]', part) if match: - key, index = match.groups() - result = result[key][int(index)] + key = part[:part.find('[')] + result = result[key] + for m in match: + try: + m = int(m) + except ValueError: + m = str(m) + result = result[m] else: - result = result.get(key) + result = result.get(part) return result def number(self, value): From f9a9e5e1b9baefa7c98b1ba6c473650e9a2c8492 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A4=8F=E6=81=BA=28Xia=20Kai=29?= <xiaket@gmail.com> Date: Wed, 23 Mar 2016 07:20:27 +0000 Subject: [PATCH 1091/1113] =?UTF-8?q?complete=20the=20docstring.=20Fixed?= =?UTF-8?q?=20#14794=20Signed-off-by:=20=E5=A4=8F=E6=81=BA(Xia=20Kai)=20<x?= =?UTF-8?q?iaket@gmail.com>?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- lib/ansible/executor/task_executor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index a0881cc222b..8f8fd93f056 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -75,7 +75,9 @@ class TaskExecutor: def run(self): ''' The main executor entrypoint, where we determine if the specified - task requires looping and either runs the task with + task requires looping and either runs the task with self._run_loop() + or self._execute(). After that, the returned results are parsed and + returned as a dict. ''' display.debug("in run()") From 197e590d7c7179048307caccb0989d3743e92fdd Mon Sep 17 00:00:00 2001 From: Matthew Gamble <git@matthewgamble.net> Date: Wed, 23 Mar 2016 19:22:18 +1100 Subject: [PATCH 1092/1113] Small comment improvement in AnsibleModule class --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index fe9647265dd..fa2306f1974 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -546,7 +546,7 @@ class AnsibleModule(object): self.aliases = self._handle_aliases() except Exception: e = get_exception() - # use exceptions here cause its not safe to call vail json until no_log is processed + # Use exceptions here because it isn't safe to call fail_json until no_log is processed print('{"failed": true, "msg": "Module alias error: %s"}' % str(e)) sys.exit(1) From 7b06ec79e32df77ea0b111f3fff7b4c4fe3195e8 Mon Sep 17 00:00:00 2001 From: Matthew Gamble <git@matthewgamble.net> Date: Wed, 23 Mar 2016 20:38:12 +1100 Subject: [PATCH 1093/1113] Add documentation for squash_actions configuration setting --- docsite/rst/intro_configuration.rst | 17 +++++++++++++++++ examples/ansible.cfg | 6 ++++++ lib/ansible/constants.py | 2 +- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 2d12825ea02..b10f460db28 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -628,6 +628,23 @@ Additional paths can be provided separated by colon characters, in the same way Roles will be first searched for in the playbook directory. Should a role not be found, it will indicate all the possible paths that were searched. +.. _cfg_squash_actions: + +squash_actions +============== + +.. versionadded:: 2.0 + +Ansible can optimise actions that call modules that support list parameters when using with\_ looping. +Instead of calling the module once for each item, the module is called once with the full list. + +The default value for this setting is only for certain package managers, but it can be used for any module:: + + squash_actions = apk,apt,dnf,package,pacman,pkgng,yum,zypper + +Currently, this is only supported for modules that have a name parameter, and only when the item is the +only thing being passed to the parameter. + .. _cfg_strategy_plugins: strategy_plugins diff --git a/examples/ansible.cfg b/examples/ansible.cfg index fe60bc71aa0..98657cc0622 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -199,6 +199,12 @@ #retry_files_enabled = False #retry_files_save_path = ~/.ansible-retry +# squash actions +# Ansible can optimise actions that call modules with list parameters +# when looping. Instead of calling the module once per with_ item, the +# module is called once with all items at once. Currently this only works +# under limited circumstances, and only with parameters named 'name'. +#squash_actions = apk,apt,dnf,package,pacman,pkgng,yum,zypper # prevents logging of task data, off by default #no_log = False diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 365bda04d06..ea4f909cf54 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -203,7 +203,7 @@ DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pa # the module takes both, bad things could happen. # In the future we should probably generalize this even further # (mapping of param: squash field) -DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, dnf, package, pkgng, yum, zypper", islist=True) +DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apk, apt, dnf, package, pacman, pkgng, yum, zypper", islist=True) # paths DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispath=True) DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispath=True) From 2e9fc918442bcc0eb5161e1e8c179348793a1818 Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Wed, 23 Mar 2016 13:49:20 +0100 Subject: [PATCH 1094/1113] Various improvements to issue/pull-request templates - Instruct to verify if an issue/request already exists - Use uppercase for titles (easier to parse as titles) - Remove empty lines where it makes sense (instructions) - Ensure that commit-message appears directly under summary (pull-requests) --- .github/ISSUE_TEMPLATE.md | 35 ++++++++++++++------------------ .github/PULL_REQUEST_TEMPLATE.md | 19 +++++++---------- 2 files changed, 22 insertions(+), 32 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 56907da419c..f5c4e11789c 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,55 +1,50 @@ -##### Issue Type: +<!--- Verify first that your issue/request is not already reported in GitHub --> -<!--- Please pick one and delete the rest: --> +##### ISSUE TYPE +<!--- Pick one below and delete the rest: --> - Bug Report - Feature Idea - Documentation Report -##### Ansible Version: +##### ANSIBLE VERSION ``` -<!--- Paste verbatim output from “ansible --version” here --> +<!--- Paste verbatim output from “ansible --version” between quotes --> ``` -##### Ansible Configuration: - +##### CONFIGURATION <!--- -Please mention any settings you have changed/added/removed in ansible.cfg +Mention any settings you have changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables). --> -##### Environment: - +##### OS / ENVIRONMENT <!--- -Please mention the OS you are running Ansible from, and the OS you are +Mention the OS you are running Ansible from, and the OS you are managing, or say “N/A” for anything that is not platform-specific. --> -##### Summary: - +##### SUMMARY <!--- Please explain the problem briefly --> -##### Steps To Reproduce: - +##### STEPS TO REPRODUCE <!--- For bugs, please show exactly how to reproduce the problem. For new features, show how the feature would be used. --> ``` -<!--- Paste example playbooks or commands here --> +<!--- Paste example playbooks or commands between quotes --> ``` <!--- You can also paste gist.github.com links for larger files --> -##### Expected Results: - +##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> -##### Actual Results: - +##### ACTUAL RESULTS <!--- What actually happened? If possible run with high verbosity (-vvvv) --> ``` -<!--- Paste verbatim command output here --> +<!--- Paste verbatim command output between quotes --> ``` diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 3c348908ed3..b9a19185306 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,20 +1,17 @@ -##### Issue Type: - -<!--- Please pick one and delete the rest: --> +##### ISSUE TYPE +<!--- Pick one below and delete the rest: --> - Feature Pull Request - New Module Pull Request - Bugfix Pull Request - Docs Pull Request -##### Ansible Version: - +##### ANSIBLE VERSION ``` -<!--- Paste verbatim output from “ansible --version” here --> +<!--- Paste verbatim output from “ansible --version” between quotes --> ``` -##### Summary: - -<!--- Please describe the change and the reason for it --> +##### SUMMARY +<!--- Describe the change, including rationale and design decisions --> <!--- If you are fixing an existing issue, please include "Fixes #nnn" in your @@ -22,8 +19,6 @@ commit message and your description; but you should still explain what the change does. --> -##### Example output: - ``` -<!-- Paste verbatim command output here if necessary --> +<!-- Paste verbatim command output here, e.g. before and after your change --> ``` From e4701b12eedaa04fdce0d3746b2f91b445d958df Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Wed, 23 Mar 2016 14:00:55 +0100 Subject: [PATCH 1095/1113] One more improvement --- .github/ISSUE_TEMPLATE.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index f5c4e11789c..eb521195646 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -25,12 +25,12 @@ managing, or say “N/A” for anything that is not platform-specific. --> ##### SUMMARY -<!--- Please explain the problem briefly --> +<!--- Explain the problem briefly --> ##### STEPS TO REPRODUCE <!--- -For bugs, please show exactly how to reproduce the problem. For new -features, show how the feature would be used. +For bugs, show exactly how to reproduce the problem. +For new features, show how the feature would be used. --> ``` From a4713db3b41269a906e60b09eb841010929d20bd Mon Sep 17 00:00:00 2001 From: Dag Wieers <dag@wieers.com> Date: Wed, 23 Mar 2016 15:32:29 +0100 Subject: [PATCH 1096/1113] Update CONTRIBUTING.md with more recent developments The file no longer reflects the new way of working (but was referenced at the top of the "new issue" interface). --- CONTRIBUTING.md | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 58f495cd533..2860c3ca7fc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,27 +1,29 @@ -Welcome To Ansible GitHub -========================= +# WELCOME TO ANSIBLE GITHUB Hi! Nice to see you here! -If you'd like to ask a question -=============================== -Please see [this web page ](http://docs.ansible.com/community.html) for community information, which includes pointers on how to ask questions on the [mailing lists](http://docs.ansible.com/community.html#mailing-list-information) and IRC. +## QUESTIONS ? -The github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those. +Please see the [community page](http://docs.ansible.com/community.html) for information on how to ask questions on the [mailing lists](http://docs.ansible.com/community.html#mailing-list-information) and IRC. -If you'd like to contribute code -================================ +The GitHub issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, as the community page explains best. -Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page. -If you'd like to file a bug -=========================== +## CONTRIBUTING ? -I'd also read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible/blob/devel/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it. +Please see the [community page](http://docs.ansible.com/community.html) for information regarding the contribution process. Important license agreement information is also included on that page. -Also please make sure you are testing on the latest released version of Ansible or the development branch. + +## BUG TO REPORT ? + +First and foremost, also check the [community page](http://docs.ansible.com/community.html). + +You can report bugs or make enhancement requests at the [Ansible GitHub issue page](http://github.com/ansible/ansible/issues/new) by filling out the issue template that will be presented. + +Also please make sure you are testing on the latest released version of Ansible or the development branch. You can find the latest releases and development branch at: + +- https://github.com/ansible/ansible/releases +- https://github.com/ansible/ansible/archive/devel.tar.gz Thanks! - - From 21e70bbd83f629e90d8a2763fd450ec5cd7291eb Mon Sep 17 00:00:00 2001 From: David Shrewsbury <shrewsbury.dave@gmail.com> Date: Wed, 23 Mar 2016 10:58:03 -0400 Subject: [PATCH 1097/1113] Update CHANGELOG for new OpenStack modules --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f287ca30164..9405e0b115f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,11 @@ Ansible Changes By Release * cs_volume * cs_zone * cs_zone_facts +- openstack + * os_flavor_facts + * os_group + * os_ironic_inspect + * os_keystone_role - windows * win_regmerge * win_timezone From 917da92e2b4e1edd52928b4dcceea84de72c0b71 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 23 Mar 2016 08:00:33 -0700 Subject: [PATCH 1098/1113] Get rid of extra trailing space in log messages --- lib/ansible/module_utils/basic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index fa2306f1974..06f4727e8a5 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1497,9 +1497,9 @@ class AnsibleModule(object): arg_val = str(arg_val) elif isinstance(arg_val, unicode): arg_val = arg_val.encode('utf-8') - msg.append('%s=%s ' % (arg, arg_val)) + msg.append('%s=%s' % (arg, arg_val)) if msg: - msg = 'Invoked with %s' % ''.join(msg) + msg = 'Invoked with %s' % ' '.join(msg) else: msg = 'Invoked' From d3583108eca98f72b9b4898a5cc5e9cf1cacf251 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 23 Mar 2016 08:00:56 -0700 Subject: [PATCH 1099/1113] Fix log_invocation test on python2 with hash randomization --- .../basic/test__log_invocation.py | 42 +++++++++++++------ 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/test/units/module_utils/basic/test__log_invocation.py b/test/units/module_utils/basic/test__log_invocation.py index b41d0bf0b39..f07b00e99b5 100644 --- a/test/units/module_utils/basic/test__log_invocation.py +++ b/test/units/module_utils/basic/test__log_invocation.py @@ -20,14 +20,14 @@ from __future__ import (absolute_import, division) __metaclass__ = type +import sys import json from ansible.compat.tests import unittest from ansible.compat.tests.mock import MagicMock class TestModuleUtilsBasic(unittest.TestCase): - - @unittest.skip("Skipping due to unknown reason. See #15105") + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_module_utils_basic__log_invocation(self): from ansible.module_utils import basic @@ -46,14 +46,30 @@ class TestModuleUtilsBasic(unittest.TestCase): am.log = MagicMock() am._log_invocation() - am.log.assert_called_with( - 'Invoked with bam=bam bar=[1, 2, 3] foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD ', - log_args={ - 'foo': 'False', - 'bar': '[1, 2, 3]', - 'bam': 'bam', - 'baz': 'baz', - 'password': 'NOT_LOGGING_PASSWORD', - 'no_log': 'NOT_LOGGING_PARAMETER', - }, - ) + + # Message is generated from a dict so it will be in an unknown order. + # have to check this manually rather than with assert_called_with() + args = am.log.call_args[0] + self.assertEqual(len(args), 1) + message = args[0] + + self.assertEqual(len(message), len('Invoked with bam=bam bar=[1, 2, 3] foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD')) + self.assertTrue(message.startswith('Invoked with ')) + self.assertIn(' bam=bam', message) + self.assertIn(' bar=[1, 2, 3]', message) + self.assertIn(' foo=False', message) + self.assertIn(' baz=baz', message) + self.assertIn(' no_log=NOT_LOGGING_PARAMETER', message) + self.assertIn(' password=NOT_LOGGING_PASSWORD', message) + + kwargs = am.log.call_args[1] + self.assertEqual(kwargs, + dict(log_args={ + 'foo': 'False', + 'bar': '[1, 2, 3]', + 'bam': 'bam', + 'baz': 'baz', + 'password': 'NOT_LOGGING_PASSWORD', + 'no_log': 'NOT_LOGGING_PARAMETER', + }) + ) From 728bb7d85ea13ba7e9e7a4a539cdeca22f2d43d5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 23 Mar 2016 08:32:42 -0700 Subject: [PATCH 1100/1113] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index f15000d7d43..7efc09ef083 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit f15000d7d435ccc85e7dce9f8f92d32026be9ed6 +Subproject commit 7efc09ef08344e56de5b953c0ac5f423895d83c4 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index aa80e700e5d..7f9cdc0350a 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit aa80e700e5da8e7a65347cf4afbedfb75d50ae84 +Subproject commit 7f9cdc0350af7adf6ac57805b660ba1e4ea53c6a From 1802e09b08a419ff317e74d1fd5a38dea70d906a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Wed, 23 Mar 2016 08:49:37 -0700 Subject: [PATCH 1101/1113] Exclude .tox from paths scanned for urlopen --- test/code-smell/replace-urlopen.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/code-smell/replace-urlopen.sh b/test/code-smell/replace-urlopen.sh index 404caf30980..428d2319fc1 100755 --- a/test/code-smell/replace-urlopen.sh +++ b/test/code-smell/replace-urlopen.sh @@ -3,7 +3,7 @@ BASEDIR=${1-"."} URLLIB_USERS=$(find "$BASEDIR" -name '*.py' -exec grep -H urlopen \{\} \;) -URLLIB_USERS=$(echo "$URLLIB_USERS" | sed '/\(\n\|lib\/ansible\/module_utils\/urls.py\|lib\/ansible\/compat\/six\/_six.py\)/d') +URLLIB_USERS=$(echo "$URLLIB_USERS" | sed '/\(\n\|lib\/ansible\/module_utils\/urls.py\|lib\/ansible\/compat\/six\/_six.py\|.tox\)/d') if test -n "$URLLIB_USERS" ; then printf "$URLLIB_USERS" exit 1 From fdb56e4bad9384c1b8028723216c10ddb45a3da2 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Sun, 20 Mar 2016 12:55:48 -0700 Subject: [PATCH 1102/1113] avoid bad path entries --- lib/ansible/module_utils/basic.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 06f4727e8a5..e57b1943aab 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1546,6 +1546,8 @@ class AnsibleModule(object): if p not in paths and os.path.exists(p): paths.append(p) for d in paths: + if not d: + continue path = os.path.join(d, arg) if os.path.exists(path) and is_executable(path): bin_path = path From 0cabef19ad14d1c4b63da2acd17c2e5ff1d2e7f5 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 23 Mar 2016 09:29:38 -0700 Subject: [PATCH 1103/1113] updated man pages with imssing become option also moved -K to become password to align with current code fixes #15123 --- docs/man/man1/ansible-playbook.1.asciidoc.in | 9 +++++++-- docs/man/man1/ansible-pull.1.asciidoc.in | 9 +++++++-- docs/man/man1/ansible.1.asciidoc.in | 9 +++++++-- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in index d34ddbc2dac..47e68d31418 100644 --- a/docs/man/man1/ansible-playbook.1.asciidoc.in +++ b/docs/man/man1/ansible-playbook.1.asciidoc.in @@ -34,7 +34,12 @@ The names of one or more YAML format files to run as ansible playbooks. OPTIONS ------- -*--ask-become-pass*:: +*-b*, *--become*:: + +Use privilege escalation (specific one depends on become_method), +this does not imply prompting for passwords. + +*-K*, *--ask-become-pass*:: Ask for privilege escalation password. @@ -47,7 +52,7 @@ For example, using ssh and not having a key-based authentication with ssh-agent Prompt for su password, used with --su (deprecated, use become). -*-K*, *--ask-sudo-pass*:: +*--ask-sudo-pass*:: Prompt for the password to use with --sudo, if any (deprecated, use become). diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in index 0afba2aeaac..9f4e1993f31 100644 --- a/docs/man/man1/ansible-pull.1.asciidoc.in +++ b/docs/man/man1/ansible-pull.1.asciidoc.in @@ -54,7 +54,12 @@ OPTIONS Adds the hostkey for the repo URL if not already added. -*--ask-become-pass*:: +*-b*, *--become*:: + +Use privilege escalation (specific one depends on become_method), +this does not imply prompting for passwords. + +*-K*, *--ask-become-pass*:: Ask for privilege escalation password. @@ -67,7 +72,7 @@ For example, using ssh and not having a key-based authentication with ssh-agent Prompt for su password, used with --su (deprecated, use become). -*-K*, *--ask-sudo-pass*:: +*--ask-sudo-pass*:: Prompt for the password to use with --sudo, if any (deprecated, use become). diff --git a/docs/man/man1/ansible.1.asciidoc.in b/docs/man/man1/ansible.1.asciidoc.in index 191b9be8aa6..92b7e826bb5 100644 --- a/docs/man/man1/ansible.1.asciidoc.in +++ b/docs/man/man1/ansible.1.asciidoc.in @@ -37,7 +37,12 @@ OPTIONS The 'ARGUMENTS' to pass to the module. -*--ask-become-pass*:: +*-b*, *--become*:: + +Use privilege escalation (specific one depends on become_method), +this does not imply prompting for passwords. + +*K*, *--ask-become-pass*:: Ask for privilege escalation password. @@ -50,7 +55,7 @@ For example, using ssh and not having a key-based authentication with ssh-agent Prompt for su password, used with --su (deprecated, use become). -*-K*, *--ask-sudo-pass*:: +*--ask-sudo-pass*:: Prompt for the password to use with --sudo, if any (deprecated, use become). From 52e9209491dee6a0c63edaa770b8601092248283 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Mon, 21 Mar 2016 14:17:53 -0700 Subject: [PATCH 1104/1113] Don't create world-readable module and tempfiles without explicit user permission --- docsite/rst/become.rst | 59 +++++++++++---- docsite/rst/intro_configuration.rst | 2 +- examples/ansible.cfg | 8 ++ lib/ansible/constants.py | 1 + lib/ansible/plugins/action/__init__.py | 93 ++++++++++++++++++++---- lib/ansible/plugins/action/assemble.py | 10 +-- lib/ansible/plugins/action/async.py | 17 ++++- lib/ansible/plugins/action/copy.py | 12 +-- lib/ansible/plugins/action/patch.py | 9 +-- lib/ansible/plugins/action/script.py | 12 +-- lib/ansible/plugins/action/template.py | 6 +- lib/ansible/plugins/action/unarchive.py | 15 ++-- lib/ansible/plugins/shell/__init__.py | 35 ++++++++- test/units/plugins/action/test_action.py | 16 ++-- 14 files changed, 217 insertions(+), 78 deletions(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index f7082d1d111..e72e40c7193 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -85,25 +85,33 @@ on how it works. Users should be aware of these to avoid surprises. Becoming an Unprivileged User ============================= -Ansible has a limitation with regards to becoming an +Ansible 2.0.x and below has a limitation with regards to becoming an unprivileged user that can be a security risk if users are not aware of it. Ansible modules are executed on the remote machine by first substituting the parameters into the module file, then copying the file to the remote machine, -and finally executing it there. If the module file is executed without using -become, when the become user is root, or when the connection to the remote -machine is made as root then the module file is created with permissions that -only allow reading by the user and root. +and finally executing it there. -If the become user is an unprivileged user and then Ansible has no choice but -to make the module file world readable as there's no other way for the user -Ansible connects as to save the file so that the user that we're becoming can -read it. +Everything is fine if the module file is executed without using ``become``, +when the ``become_user`` is root, or when the connection to the remote machine +is made as root. In these cases the module file is created with permissions +that only allow reading by the user and root. + +The problem occurs when the ``become_user`` is an unprivileged user. Ansible +2.0.x and below make the module file world readable in this case as the module +file is written as the user that Ansible connects as but the file needs to +be reasable by the user Ansible is set to ``become``. + +.. note:: In Ansible 2.1, this window is further narrowed: If the connection + is made as a privileged user (root) then Ansible 2.1 and above will use + chown to set the file's owner to the unprivileged user being switched to. + This means both the user making the connection and the user being switched + to via ``become`` must be unprivileged in order to trigger this problem. If any of the parameters passed to the module are sensitive in nature then -those pieces of data are readable by reading the module file for the duration -of the Ansible module execution. Once the module is done executing Ansible -will delete the temporary file. If you trust the client machines then there's -no problem here. If you do not trust the client machines then this is +those pieces of data are located in a world readable module file for the +duration of the Ansible module execution. Once the module is done executing +Ansible will delete the temporary file. If you trust the client machines then +there's no problem here. If you do not trust the client machines then this is a potential danger. Ways to resolve this include: @@ -113,9 +121,32 @@ Ways to resolve this include: the remote python interpreter's stdin. Pipelining does not work for non-python modules. +* (Available in Ansible 2.1) Install filesystem acl support on the managed + host. If the temporary directory on the remote host is mounted with + filesystem acls enabled and the :command:`setfacl` tool is in the remote + ``PATH`` then Ansible will use filesystem acls to share the module file with + the second unprivileged instead of having to make the file readable by + everyone. + * Don't perform an action on the remote machine by becoming an unprivileged user. Temporary files are protected by UNIX file permissions when you - become root or do not use become. + ``become`` root or do not use ``become``. In Ansible 2.1 and above, UNIX + file permissions are also secure if you make the connection to the managed + machine as root and then use ``become`` to an unprivileged account. + +.. versionchanged:: 2.1 + +In addition to the additional means of doing this securely, Ansible 2.1 also +makes it harder to unknowingly do this insecurely. Whereas in Ansible 2.0.x +and below, Ansible will silently allow the insecure behaviour if it was unable +to find another way to share the files with the unprivileged user, in Ansible +2.1 and above Ansible defaults to issuing an error if it can't do this +securely. If you can't make any of the changes above to resolve the problem +and you decide that the machine you're running on is secure enough for the +modules you want to run there to be world readable you can turn on +``allow_world_readable_tmpfiles`` in the :file:`ansible.cfg` file. Setting +``allow_world_readable_tmpfiles`` will change this from an error into +a warning and allow the task to run as it did prior to 2.1. Connection Plugin Support ========================= diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index b10f460db28..1ca71e210e0 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -60,7 +60,7 @@ General defaults In the [defaults] section of ansible.cfg, the following settings are tunable: -.. _action_plugins: +.. _cfg_action_plugins: action_plugins ============== diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 98657cc0622..19913af9aa8 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -212,6 +212,14 @@ # prevents logging of tasks, but only on the targets, data is still logged on the master/controller #no_target_syslog = False +# controls whether Ansible will raise an error or warning if a task has no +# choice but to create world readable temporary files to execute a module on +# the remote machine. This option is False by default for security. Users may +# turn this on to have behaviour more like Ansible prior to 2.1.x. See +# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user +# for more secure ways to fix this than enabling this option. +#allow_world_readable_tmpfiles = False + # controls the compression level of variables sent to # worker processes. At the default of 0, no compression # is used. This value must be an integer from 0 to 9. diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index ea4f909cf54..1a9cbbce739 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -165,6 +165,7 @@ DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', # disclosure DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True) DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True) +ALLOW_WORLD_READABLE_TMPFILES = get_config(p, DEFAULTS, 'allow_world_readable_tmpfiles', None, False, boolean=True) # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 8666329ffff..2ba0650de39 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -192,7 +192,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): return True return False - def _make_tmp_path(self): + def _make_tmp_path(self, remote_user): ''' Create and return a temporary path on a remote box. ''' @@ -200,12 +200,10 @@ class ActionBase(with_metaclass(ABCMeta, object)): basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) use_system_tmp = False - if self._play_context.become and self._play_context.become_user != 'root': + if self._play_context.become and self._play_context.become_user not in ('root', remote_user): use_system_tmp = True - tmp_mode = None - if self._play_context.remote_user != 'root' or self._play_context.become and self._play_context.become_user != 'root': - tmp_mode = 0o755 + tmp_mode = 0o700 cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode) result = self._low_level_execute_command(cmd, sudoable=False) @@ -255,6 +253,10 @@ class ActionBase(with_metaclass(ABCMeta, object)): # If ssh breaks we could leave tmp directories out on the remote system. self._low_level_execute_command(cmd, sudoable=False) + def _transfer_file(self, local_path, remote_path): + self._connection.put_file(local_path, remote_path) + return remote_path + def _transfer_data(self, remote_path, data): ''' Copies the module data out to the temporary module path. @@ -269,25 +271,85 @@ class ActionBase(with_metaclass(ABCMeta, object)): data = to_bytes(data, errors='strict') afo.write(data) except Exception as e: - #raise AnsibleError("failure encoding into utf-8: %s" % str(e)) raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e)) afo.flush() afo.close() try: - self._connection.put_file(afile, remote_path) + self._transfer_file(afile, remote_path) finally: os.unlink(afile) return remote_path - def _remote_chmod(self, mode, path, sudoable=False): + def _fixup_perms(self, remote_path, remote_user, execute=False, recursive=True): + """ + If the become_user is unprivileged and different from the + remote_user then we need to make the files we've uploaded readable by them. + """ + if remote_path is None: + # Sometimes code calls us naively -- it has a var which could + # contain a path to a tmp dir but doesn't know if it needs to + # exist or not. If there's no path, then there's no need for us + # to do work + self._display.debug('_fixup_perms called with remote_path==None. Sure this is correct?') + return remote_path + + if self._play_context.become and self._play_context.become_user not in ('root', remote_user): + # Unprivileged user that's different than the ssh user. Let's get + # to work! + if remote_user == 'root': + # SSh'ing as root, therefore we can chown + self._remote_chown(remote_path, self._play_context.become_user, recursive=recursive) + if execute: + # root can read things that don't have read bit but can't + # execute them. + self._remote_chmod('u+x', remote_path, recursive=recursive) + else: + if execute: + mode = 'rx' + else: + mode = 'rX' + # Try to use fs acls to solve this problem + res = self._remote_set_user_facl(remote_path, self._play_context.become_user, mode, recursive=recursive, sudoable=False) + if res['rc'] != 0: + if C.ALLOW_WORLD_READABLE_TMPFILES: + # fs acls failed -- do things this insecure way only + # if the user opted in in the config file + self._display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user which may be insecure. For information on securing this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user') + self._remote_chmod('a+%s' % mode, remote_path, recursive=recursive) + else: + raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user. For information on working around this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user') + elif execute: + # Can't depend on the file being transferred with execute + # permissions. Only need user perms because no become was + # used here + self._remote_chmod('u+x', remote_path, recursive=recursive) + + return remote_path + + def _remote_chmod(self, mode, path, recursive=True, sudoable=False): ''' Issue a remote chmod command ''' + cmd = self._connection._shell.chmod(mode, path, recursive=recursive) + res = self._low_level_execute_command(cmd, sudoable=sudoable) + return res - cmd = self._connection._shell.chmod(mode, path) + def _remote_chown(self, path, user, group=None, recursive=True, sudoable=False): + ''' + Issue a remote chown command + ''' + cmd = self._connection._shell.chown(path, user, group, recursive=recursive) + res = self._low_level_execute_command(cmd, sudoable=sudoable) + return res + + def _remote_set_user_facl(self, path, user, mode, recursive=True, sudoable=False): + ''' + Issue a remote call to setfacl + ''' + cmd = self._connection._shell.set_user_facl(path, user, mode, recursive=recursive) res = self._low_level_execute_command(cmd, sudoable=sudoable) return res @@ -417,6 +479,9 @@ class ActionBase(with_metaclass(ABCMeta, object)): else: module_args['_ansible_check_mode'] = False + # Get the connection user for permission checks + remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user + # set no log in the module arguments, if required module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG @@ -437,7 +502,7 @@ class ActionBase(with_metaclass(ABCMeta, object)): remote_module_path = None args_file_path = None if not tmp and self._late_needs_tmp_path(tmp, module_style): - tmp = self._make_tmp_path() + tmp = self._make_tmp_path(remote_user) if tmp: remote_module_filename = self._connection._shell.get_remote_filename(module_name) @@ -462,11 +527,9 @@ class ActionBase(with_metaclass(ABCMeta, object)): environment_string = self._compute_environment_string() - if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root': - # deal with possible umask issues once sudo'ed to other user - self._remote_chmod('a+r', remote_module_path) - if args_file_path is not None: - self._remote_chmod('a+r', args_file_path) + # Fix permissions of the tmp path and tmp files. This should be + # called after all files have been transferred. + self._fixup_perms(tmp, remote_user, recursive=True) cmd = "" in_data = None diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py index eeb13c21ae9..9c7e7091225 100644 --- a/lib/ansible/plugins/action/assemble.py +++ b/lib/ansible/plugins/action/assemble.py @@ -98,8 +98,9 @@ class ActionModule(ActionBase): return result cleanup_remote_tmp = False + remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user if not tmp: - tmp = self._make_tmp_path() + tmp = self._make_tmp_path(remote_user) cleanup_remote_tmp = True if boolean(remote_src): @@ -146,16 +147,15 @@ class ActionModule(ActionBase): ) if path_checksum != dest_stat['checksum']: - resultant = file(path).read() if self._play_context.diff: diff = self._get_diff_data(dest, path, task_vars) - xfered = self._transfer_data('src', resultant) + remote_path = self._connection._shell.join_path(tmp, 'src') + xfered = self._transfer_file(path, remote_path) # fix file permissions when the copy is done as a different user - if self._play_context.become and self._play_context.become_user != 'root': - self._remote_chmod('a+r', xfered) + self._fixup_perms(tmp, remote_user, recursive=True) new_module_args.update( dict( src=xfered,)) diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py index 8a7175aeb86..b85f1c20f46 100644 --- a/lib/ansible/plugins/action/async.py +++ b/lib/ansible/plugins/action/async.py @@ -38,8 +38,9 @@ class ActionModule(ActionBase): result['msg'] = 'check mode not supported for this module' return result + remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user if not tmp: - tmp = self._make_tmp_path() + tmp = self._make_tmp_path(remote_user) module_name = self._task.action async_module_path = self._connection._shell.join_path(tmp, 'async_wrapper') @@ -54,15 +55,25 @@ class ActionModule(ActionBase): # configure, upload, and chmod the target module (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars) self._transfer_data(remote_module_path, module_data) - self._remote_chmod('a+rx', remote_module_path) # configure, upload, and chmod the async_wrapper module (async_module_style, shebang, async_module_data) = self._configure_module(module_name='async_wrapper', module_args=dict(), task_vars=task_vars) self._transfer_data(async_module_path, async_module_data) - self._remote_chmod('a+rx', async_module_path) argsfile = self._transfer_data(self._connection._shell.join_path(tmp, 'arguments'), json.dumps(module_args)) + self._fixup_perms(tmp, remote_user, execute=True, recursive=True) + # Only the following two files need to be executable but we'd have to + # make three remote calls if we wanted to just set them executable. + # There's not really a problem with marking too many of the temp files + # executable so we go ahead and mark them all as executable in the + # line above (the line above is needed in any case [although + # execute=False is okay if we uncomment the lines below] so that all + # the files are readable in case the remote_user and become_user are + # different and both unprivileged) + #self._fixup_perms(remote_module_path, remote_user, execute=True, recursive=False) + #self._fixup_perms(async_module_path, remote_user, execute=True, recursive=False) + async_limit = self._task.async async_jid = str(random.randint(0, 999999999999)) diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index b8094b2bd61..9734a294446 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -141,9 +141,10 @@ class ActionModule(ActionBase): delete_remote_tmp = (len(source_files) == 1) # If this is a recursive action create a tmp path that we can share as the _exec_module create is too late. + remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user if not delete_remote_tmp: if tmp is None or "-tmp-" not in tmp: - tmp = self._make_tmp_path() + tmp = self._make_tmp_path(remote_user) # expand any user home dir specifier dest = self._remote_expand_user(dest) @@ -196,7 +197,7 @@ class ActionModule(ActionBase): # If this is recursive we already have a tmp path. if delete_remote_tmp: if tmp is None or "-tmp-" not in tmp: - tmp = self._make_tmp_path() + tmp = self._make_tmp_path(remote_user) if self._play_context.diff and not raw: diffs.append(self._get_diff_data(dest_file, source_full, task_vars)) @@ -211,16 +212,15 @@ class ActionModule(ActionBase): tmp_src = self._connection._shell.join_path(tmp, 'source') if not raw: - self._connection.put_file(source_full, tmp_src) + self._transfer_file(source_full, tmp_src) else: - self._connection.put_file(source_full, dest_file) + self._transfer_file(source_full, dest_file) # We have copied the file remotely and no longer require our content_tempfile self._remove_tempfile_if_content_defined(content, content_tempfile) # fix file permissions when the copy is done as a different user - if self._play_context.become and self._play_context.become_user != 'root': - self._remote_chmod('a+r', tmp_src) + self._fixup_perms(tmp, remote_user, recursive=True) if raw: # Continue to next iteration if raw is defined. diff --git a/lib/ansible/plugins/action/patch.py b/lib/ansible/plugins/action/patch.py index c08f901cf58..4fbc69b66a9 100644 --- a/lib/ansible/plugins/action/patch.py +++ b/lib/ansible/plugins/action/patch.py @@ -34,6 +34,7 @@ class ActionModule(ActionBase): src = self._task.args.get('src', None) remote_src = boolean(self._task.args.get('remote_src', 'no')) + remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user if src is None: result['failed'] = True @@ -52,14 +53,12 @@ class ActionModule(ActionBase): # create the remote tmp dir if needed, and put the source file there if tmp is None or "-tmp-" not in tmp: - tmp = self._make_tmp_path() + tmp = self._make_tmp_path(remote_user) tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src)) - self._connection.put_file(src, tmp_src) + self._transfer_file(src, tmp_src) - if self._play_context.become and self._play_context.become_user != 'root': - if not self._play_context.check_mode: - self._remote_chmod('a+r', tmp_src) + self._fixup_perms(tmp, remote_user, recursive=True) new_module_args = self._task.args.copy() new_module_args.update( diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py index 5b0f324dfcf..a0d66405486 100644 --- a/lib/ansible/plugins/action/script.py +++ b/lib/ansible/plugins/action/script.py @@ -38,8 +38,9 @@ class ActionModule(ActionBase): result['msg'] = 'check mode not supported for this module' return result + remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user if not tmp: - tmp = self._make_tmp_path() + tmp = self._make_tmp_path(remote_user) creates = self._task.args.get('creates') if creates: @@ -76,16 +77,11 @@ class ActionModule(ActionBase): # transfer the file to a remote tmp location tmp_src = self._connection._shell.join_path(tmp, os.path.basename(source)) - self._connection.put_file(source, tmp_src) + self._transfer_file(source, tmp_src) sudoable = True # set file permissions, more permissive when the copy is done as a different user - if self._play_context.become and self._play_context.become_user != 'root': - chmod_mode = 'a+rx' - sudoable = False - else: - chmod_mode = '+rx' - self._remote_chmod(chmod_mode, tmp_src, sudoable=sudoable) + self._fixup_perms(tmp, remote_user, execute=True, recursive=True) # add preparation steps to one ssh roundtrip executing the script env_string = self._compute_environment_string() diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 5ddd624bde2..66320aaf293 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -138,8 +138,9 @@ class ActionModule(ActionBase): return result cleanup_remote_tmp = False + remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user if not tmp: - tmp = self._make_tmp_path() + tmp = self._make_tmp_path(remote_user) cleanup_remote_tmp = True local_checksum = checksum_s(resultant) @@ -163,8 +164,7 @@ class ActionModule(ActionBase): xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant) # fix file permissions when the copy is done as a different user - if self._play_context.become and self._play_context.become_user != 'root': - self._remote_chmod('a+r', xfered) + self._fixup_perms(tmp, remote_user, recursive=True) # run the copy module new_module_args.update( diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py index b6c43a3c595..6ace6f99758 100644 --- a/lib/ansible/plugins/action/unarchive.py +++ b/lib/ansible/plugins/action/unarchive.py @@ -45,8 +45,9 @@ class ActionModule(ActionBase): result['msg'] = "src (or content) and dest are required" return result + remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user if not tmp: - tmp = self._make_tmp_path() + tmp = self._make_tmp_path(remote_user) if creates: # do not run the command if the line contains creates=filename @@ -80,17 +81,15 @@ class ActionModule(ActionBase): if copy: # transfer the file to a remote tmp location - tmp_src = tmp + 'source' - self._connection.put_file(source, tmp_src) + tmp_src = self._connection._shell.join_path(tmp, 'source') + self._transfer_file(source, tmp_src) # handle diff mode client side # handle check mode client side - # fix file permissions when the copy is done as a different user - if copy: - if self._play_context.become and self._play_context.become_user != 'root': - if not self._play_context.check_mode: - self._remote_chmod('a+r', tmp_src) + if copy: + # fix file permissions when the copy is done as a different user + self._fixup_perms(tmp, remote_user, recursive=True) # Build temporary module_args. new_module_args = self._task.args.copy() new_module_args.update( diff --git a/lib/ansible/plugins/shell/__init__.py b/lib/ansible/plugins/shell/__init__.py index 50530c6b691..daa011e3da0 100644 --- a/lib/ansible/plugins/shell/__init__.py +++ b/lib/ansible/plugins/shell/__init__.py @@ -52,9 +52,40 @@ class ShellBase(object): def path_has_trailing_slash(self, path): return path.endswith('/') - def chmod(self, mode, path): + def chmod(self, mode, path, recursive=True): path = pipes.quote(path) - return 'chmod %s %s' % (mode, path) + cmd = ['chmod', mode, path] + if recursive: + cmd.append('-R') + return ' '.join(cmd) + + def chown(self, path, user, group=None, recursive=True): + path = pipes.quote(path) + user = pipes.quote(user) + + if group is None: + cmd = ['chown', user, path] + else: + group = pipes.quote(group) + cmd = ['chown', '%s:%s' % (user, group), path] + + if recursive: + cmd.append('-R') + + return ' '.join(cmd) + + def set_user_facl(self, path, user, mode, recursive=True): + """Only sets acls for users as that's really all we need""" + path = pipes.quote(path) + mode = pipes.quote(mode) + user = pipes.quote(user) + + cmd = ['setfacl'] + if recursive: + cmd.append('-R') + cmd.extend(('-m', 'u:%s:%s %s' % (user, mode, path))) + + return ' '.join(cmd) def remove(self, path, recurse=False): path = pipes.quote(path) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index 4bb151f090f..8c97bf04155 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -392,27 +392,27 @@ class TestActionBase(unittest.TestCase): action_base._low_level_execute_command = MagicMock() action_base._low_level_execute_command.return_value = dict(rc=0, stdout='/some/path') - self.assertEqual(action_base._make_tmp_path(), '/some/path/') + self.assertEqual(action_base._make_tmp_path('root'), '/some/path/') # empty path fails action_base._low_level_execute_command.return_value = dict(rc=0, stdout='') - self.assertRaises(AnsibleError, action_base._make_tmp_path) + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') # authentication failure action_base._low_level_execute_command.return_value = dict(rc=5, stdout='') - self.assertRaises(AnsibleError, action_base._make_tmp_path) + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') # ssh error action_base._low_level_execute_command.return_value = dict(rc=255, stdout='', stderr='') - self.assertRaises(AnsibleError, action_base._make_tmp_path) + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') play_context.verbosity = 5 - self.assertRaises(AnsibleError, action_base._make_tmp_path) + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') # general error action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='') - self.assertRaises(AnsibleError, action_base._make_tmp_path) + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='No space left on device') - self.assertRaises(AnsibleError, action_base._make_tmp_path) + self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') def test_action_base__remove_tmp_path(self): # create our fake task @@ -567,8 +567,8 @@ class TestActionBase(unittest.TestCase): action_base._make_tmp_path = MagicMock() action_base._transfer_data = MagicMock() action_base._compute_environment_string = MagicMock() - action_base._remote_chmod = MagicMock() action_base._low_level_execute_command = MagicMock() + action_base._fixup_perms = MagicMock() action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data') action_base._late_needs_tmp_path.return_value = False From a083c4f5a7e4f3d8105f270a1a40dfffff99fa4e Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg <greg.dekoenigsberg@gmail.com> Date: Wed, 23 Mar 2016 12:59:23 -0400 Subject: [PATCH 1105/1113] Add meeting info to Ansible community page --- docsite/rst/community.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index 97ca9a866c0..66543c6245b 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -227,6 +227,12 @@ which is our official conference series. To subscribe to a group from a non-google account, you can send an email to the subscription address requesting the subscription. For example: ansible-devel+subscribe@googlegroups.com +IRC Meetings +------------ + +The Ansible community holds regular IRC meetings on various topics, and anyone who is interested is invited to +participate. For more information about Ansible meetings, consult the [Ansible community meeting page](https://github.com/ansible/community/blob/master/MEETINGS.md). + Release Numbering ----------------- From bb45e23e5b3e80b45ae699ccdd4000063e9af4d9 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 23 Mar 2016 12:11:57 -0700 Subject: [PATCH 1106/1113] added sofltayer vm module to changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9405e0b115f..8fb7e76a6c0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,8 @@ Ansible Changes By Release * os_group * os_ironic_inspect * os_keystone_role +- softlayer + * sl_vm - windows * win_regmerge * win_timezone From 943e4d37f512b8b28fe828e4e08289e94e025d18 Mon Sep 17 00:00:00 2001 From: Matt Clay <matt@mystile.com> Date: Sun, 20 Mar 2016 16:24:01 -0700 Subject: [PATCH 1107/1113] Run more connection tests in Docker. Connection tests are now run before non_destructive to make sure that the known_hosts file in our Docker image has not been removed. --- test/integration/Makefile | 14 +++++++++++--- test/integration/test_connection.inventory | 10 ++++++---- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index c372316e20e..f12cc03a0f1 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -23,7 +23,7 @@ VAULT_PASSWORD_FILE = vault-password CONSUL_RUNNING := $(shell python consul_running.py) EUID := $(shell id -u -r) -all: setup test_test_infra parsing test_var_precedence unicode test_templating_settings environment non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log test_connection test_gathering_facts +all: setup test_test_infra parsing test_var_precedence unicode test_templating_settings environment test_connection non_destructive destructive includes blocks pull check_mode test_hash test_handlers test_group_by test_vault test_tags test_lookup_paths no_log test_gathering_facts test_test_infra: # ensure fail/assert work locally and can stop execution with non-zero exit code @@ -82,13 +82,21 @@ environment: setup non_destructive: setup ansible-playbook non_destructive.yml -i $(INVENTORY) -e outputdir=$(TEST_DIR) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +# For our Docker images, which identify themselves with "ENV container=docker", use the test_docker inventory group. +# Otherwise use the test_default inventory group, which runs fewer tests, but should work on any system. +ifeq ($(container),docker) +TEST_CONNECTION_FILTER := 'test_docker' +else +TEST_CONNECTION_FILTER := 'test_default' +endif + # Skip connection plugins which require root when not running as root. ifneq ($(EUID),0) -TEST_CONNECTION_FILTER := !chroot +TEST_CONNECTION_FILTER += !chroot endif # Connection plugin test command to repeat with each locale setting. -TEST_CONNECTION_CMD = $(1) ansible-playbook test_connection.yml -i test_connection.inventory -l '!skip-during-build $(TEST_CONNECTION_FILTER)' $(TEST_FLAGS) +TEST_CONNECTION_CMD = $(1) ansible-playbook test_connection.yml -i test_connection.inventory -l '$(TEST_CONNECTION_FILTER)' $(TEST_FLAGS) test_connection: setup $(call TEST_CONNECTION_CMD) diff --git a/test/integration/test_connection.inventory b/test/integration/test_connection.inventory index 261bd7020f8..9e3ab602cbb 100644 --- a/test/integration/test_connection.inventory +++ b/test/integration/test_connection.inventory @@ -48,9 +48,11 @@ paramiko_ssh-no-pipelining ansible_ssh_pipelining=false ansible_host=localhost ansible_connection=paramiko_ssh -[skip-during-build:children] -docker -libvirt_lxc -jail +[test_default:children] +local +chroot + +[test_docker:children] +test_default ssh paramiko_ssh From 5b11494437e0bcb888f81552ce6fa5885a880c50 Mon Sep 17 00:00:00 2001 From: Brian Coca <brian.coca+git@gmail.com> Date: Wed, 16 Mar 2016 14:12:48 -0700 Subject: [PATCH 1108/1113] python3 compatiblity remove use of basestring deal with configparser --- lib/ansible/executor/playbook_executor.py | 5 +++-- lib/ansible/executor/task_executor.py | 5 ++++- lib/ansible/executor/task_queue_manager.py | 3 ++- lib/ansible/plugins/callback/logentries.py | 5 +++-- lib/ansible/plugins/connection/local.py | 2 +- 5 files changed, 13 insertions(+), 7 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 31d62984508..5a1b76f9410 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -168,8 +168,9 @@ class PlaybookExecutor: # send the stats callback for this playbook if self._tqm is not None: if C.RETRY_FILES_ENABLED: - retries = list(set(self._tqm._failed_hosts.keys() + self._tqm._unreachable_hosts.keys())) - retries.sort() + retries = set(self._tqm._failed_hosts.keys()) + retries.update(self._tqm._unreachable_hosts.keys()) + retries = sorted(retries) if len(retries) > 0: if C.RETRY_FILES_SAVE_PATH: basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index d25fb036f58..b198f225e4d 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -607,7 +607,8 @@ class TaskExecutor: try: cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = cmd.communicate() - if "Bad configuration option" in err or "Usage:" in err: + err = to_unicode(err) + if u"Bad configuration option" in err or u"Usage:" in err: conn_type = "paramiko" except OSError: conn_type = "paramiko" @@ -645,7 +646,9 @@ class TaskExecutor: try: connection._connect() except AnsibleConnectionFailure: + display.debug('connection failed, fallback to accelerate') res = handler._execute_module(module_name='accelerate', module_args=accelerate_args, task_vars=variables, delete_remote_tmp=False) + display.debug(res) connection._connect() return connection diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 601d27c58b5..a16ac755da5 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -34,6 +34,7 @@ from ansible.template import Templar from ansible.vars.hostvars import HostVars from ansible.plugins.callback import CallbackBase from ansible.utils.unicode import to_unicode +from ansible.compat.six import string_types try: from __main__ import display @@ -143,7 +144,7 @@ class TaskQueueManager: if isinstance(self._stdout_callback, CallbackBase): stdout_callback_loaded = True - elif isinstance(self._stdout_callback, basestring): + elif isinstance(self._stdout_callback, string_types): if self._stdout_callback not in callback_loader: raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback) else: diff --git a/lib/ansible/plugins/callback/logentries.py b/lib/ansible/plugins/callback/logentries.py index 281ca044c5f..f5e80d3be76 100644 --- a/lib/ansible/plugins/callback/logentries.py +++ b/lib/ansible/plugins/callback/logentries.py @@ -60,8 +60,9 @@ import socket import random import time import codecs -import ConfigParser import uuid +from ansible.compat.six.moves import configparser + try: import certifi HAS_CERTIFI = True @@ -212,7 +213,7 @@ class CallbackModule(CallbackBase): 'Disabling the Logentries callback plugin.') config_path = os.path.abspath(os.path.dirname(__file__)) - config = ConfigParser.ConfigParser() + config = configparser.ConfigParser() try: config.readfp(open(os.path.join(config_path, 'logentries.ini'))) if config.has_option('logentries', 'api'): diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py index 00ee3bc7071..737a31971e0 100644 --- a/lib/ansible/plugins/connection/local.py +++ b/lib/ansible/plugins/connection/local.py @@ -79,7 +79,7 @@ class Connection(ConnectionBase): p = subprocess.Popen( cmd, - shell=isinstance(cmd, basestring), + shell=isinstance(cmd, (text_type, binary_type)), executable=executable, #cwd=... stdin=subprocess.PIPE, stdout=subprocess.PIPE, From f427955640dab75f8be4f97d3477e5303346c455 Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 21 Mar 2016 01:38:48 -0400 Subject: [PATCH 1109/1113] Make default var lookup for tasks follow the dep chain Related to #14296 (@jjshoe's follow-up example on that issue) --- lib/ansible/playbook/role/__init__.py | 5 ++++- lib/ansible/vars/__init__.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 133dd50acca..b86ad0fd024 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -252,10 +252,13 @@ class Role(Base, Become, Conditional, Taggable): def get_parents(self): return self._parents - def get_default_vars(self): + def get_default_vars(self, dep_chain=[]): default_vars = dict() for dep in self.get_all_dependencies(): default_vars = combine_vars(default_vars, dep.get_default_vars()) + if dep_chain: + for parent in dep_chain: + default_vars = combine_vars(default_vars, parent._default_vars) default_vars = combine_vars(default_vars, self._default_vars) return default_vars diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 2d1a872f294..c95d25c5a32 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -220,7 +220,7 @@ class VariableManager: # sure it sees its defaults above any other roles, as we previously # (v1) made sure each task had a copy of its roles default vars if task and task._role is not None: - all_vars = combine_vars(all_vars, task._role.get_default_vars()) + all_vars = combine_vars(all_vars, task._role.get_default_vars(dep_chain=task._block._dep_chain)) if host: # next, if a host is specified, we load any vars from group_vars From f323eb858e543a4f51f15f0b3090925fbef1cefb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 24 Mar 2016 06:57:52 -0700 Subject: [PATCH 1110/1113] nv_por is needed later in the file Fixes #14654 --- lib/ansible/plugins/action/synchronize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index c0845d97f2c..02771e3fcc5 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -204,8 +204,8 @@ class ActionModule(ActionBase): dest_is_local = True # CHECK FOR NON-DEFAULT SSH PORT + inv_port = task_vars.get('ansible_ssh_port', None) or C.DEFAULT_REMOTE_PORT if self._task.args.get('dest_port', None) is None: - inv_port = task_vars.get('ansible_ssh_port', None) or C.DEFAULT_REMOTE_PORT if inv_port is not None: self._task.args['dest_port'] = inv_port From 2c20579a06925a16abb95798237b97ce4987173d Mon Sep 17 00:00:00 2001 From: James Cammarata <jimi@sngx.net> Date: Mon, 25 Jan 2016 13:04:52 -0500 Subject: [PATCH 1111/1113] Add options to make includes 'static' * Can be configured in the ansible.cfg for tasks/handlers individually * If an included filename contains no vars or loops, it will be expanded in-place as if it were marked as static --- examples/ansible.cfg | 7 + lib/ansible/cli/playbook.py | 37 +++-- lib/ansible/constants.py | 4 + lib/ansible/errors/__init__.py | 6 +- lib/ansible/executor/task_queue_manager.py | 14 +- lib/ansible/playbook/helpers.py | 150 +++++++++++++++++++-- lib/ansible/playbook/included_file.py | 3 + lib/ansible/playbook/role/__init__.py | 8 +- lib/ansible/playbook/task_include.py | 72 ++++++++++ 9 files changed, 268 insertions(+), 33 deletions(-) create mode 100644 lib/ansible/playbook/task_include.py diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 19913af9aa8..0f4315e8c2c 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -54,6 +54,13 @@ # enable additional callbacks #callback_whitelist = timer, mail +# Determine whether includes in tasks and handlers are "static" by +# default. As of 2.0, includes are dynamic by default. Setting these +# values to True will make includes behave more like they did in the +# 1.x versions. +#task_includes_static = True +#handler_includes_static = True + # change this for alternative sudo implementations #sudo_exe = sudo diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index dfd06b19208..ff0255e6531 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -30,6 +30,7 @@ from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.executor.playbook_executor import PlaybookExecutor from ansible.inventory import Inventory from ansible.parsing.dataloader import DataLoader +from ansible.playbook.block import Block from ansible.playbook.play_context import PlayContext from ansible.utils.vars import load_extra_vars from ansible.vars import VariableManager @@ -172,26 +173,34 @@ class PlaybookCLI(CLI): if self.options.listtasks: taskmsg = ' tasks:\n' + def _process_block(b): + taskmsg = '' + for task in b.block: + if isinstance(task, Block): + taskmsg += _process_block(task) + else: + if task.action == 'meta': + continue + + all_tags.update(task.tags) + if self.options.listtasks: + cur_tags = list(mytags.union(set(task.tags))) + cur_tags.sort() + if task.name: + taskmsg += " %s" % task.get_name() + else: + taskmsg += " %s" % task.action + taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) + + return taskmsg + all_vars = variable_manager.get_vars(loader=loader, play=play) play_context = PlayContext(play=play, options=self.options) for block in play.compile(): block = block.filter_tagged_tasks(play_context, all_vars) if not block.has_tasks(): continue - - for task in block.block: - if task.action == 'meta': - continue - - all_tags.update(task.tags) - if self.options.listtasks: - cur_tags = list(mytags.union(set(task.tags))) - cur_tags.sort() - if task.name: - taskmsg += " %s" % task.get_name() - else: - taskmsg += " %s" % task.action - taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) + taskmsg += _process_block(block) if self.options.listtags: cur_tags = list(mytags.union(all_tags)) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 1a9cbbce739..b45883e117b 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -162,6 +162,10 @@ DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_F DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True) DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, integer=True) +# static includes +DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, boolean=True) +DEFAULT_HANDLER_INCLUDES_STATIC = get_config(p, DEFAULTS, 'handler_includes_static', 'ANSIBLE_HANDLER_INCLUDES_STATIC', False, boolean=True) + # disclosure DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True) DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True) diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py index faf7c33416d..78259000aa8 100644 --- a/lib/ansible/errors/__init__.py +++ b/lib/ansible/errors/__init__.py @@ -44,7 +44,7 @@ class AnsibleError(Exception): which should be returned by the DataLoader() class. ''' - def __init__(self, message="", obj=None, show_content=True): + def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False): # we import this here to prevent an import loop problem, # since the objects code also imports ansible.errors from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject @@ -53,8 +53,10 @@ class AnsibleError(Exception): self._show_content = show_content if obj and isinstance(obj, AnsibleBaseYAMLObject): extended_error = self._get_extended_error() - if extended_error: + if extended_error and not suppress_extended_error: self.message = '%s\n\n%s' % (to_str(message), to_str(extended_error)) + else: + self.message = '%s' % to_str(message) else: self.message = '%s' % to_str(message) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index a16ac755da5..62ec8295e77 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -28,6 +28,7 @@ from ansible.errors import AnsibleError from ansible.executor.play_iterator import PlayIterator from ansible.executor.process.result import ResultProcess from ansible.executor.stats import AggregateStats +from ansible.playbook.block import Block from ansible.playbook.play_context import PlayContext from ansible.plugins import callback_loader, strategy_loader, module_loader from ansible.template import Templar @@ -118,11 +119,18 @@ class TaskQueueManager: for key in self._notified_handlers.keys(): del self._notified_handlers[key] - # FIXME: there is a block compile helper for this... + def _process_block(b): + temp_list = [] + for t in b.block: + if isinstance(t, Block): + temp_list.extend(_process_block(t)) + else: + temp_list.append(t) + return temp_list + handler_list = [] for handler_block in handlers: - for handler in handler_block.block: - handler_list.append(handler) + handler_list.extend(_process_block(handler_block)) # then initialize it with the handler names from the handler list for handler in handler_list: diff --git a/lib/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py index c4f11c1c8ed..2c759b3ad58 100644 --- a/lib/ansible/playbook/helpers.py +++ b/lib/ansible/playbook/helpers.py @@ -20,9 +20,17 @@ __metaclass__ = type import os -from ansible.errors import AnsibleParserError +from ansible import constants as C +from ansible.compat.six import string_types +from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleSequence +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): ''' @@ -72,16 +80,18 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h from ansible.playbook.block import Block from ansible.playbook.handler import Handler from ansible.playbook.task import Task + from ansible.playbook.task_include import TaskInclude + from ansible.template import Templar assert isinstance(ds, list) task_list = [] - for task in ds: - assert isinstance(task, dict) + for task_ds in ds: + assert isinstance(task_ds, dict) - if 'block' in task: + if 'block' in task_ds: t = Block.load( - task, + task_ds, play=play, parent_block=block, role=role, @@ -90,13 +100,133 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h variable_manager=variable_manager, loader=loader, ) + task_list.append(t) else: - if use_handlers: - t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) - else: - t = Task.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) + if 'include' in task_ds: + t = TaskInclude.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) - task_list.append(t) + all_vars = variable_manager.get_vars(loader=loader, play=play, task=t) + templar = Templar(loader=loader, variables=all_vars) + + # check to see if this include is static, which can be true if: + # 1. the user set the 'static' option to true + # 2. one of the appropriate config options was set + # 3. the included file name contains no variables, and has no loop + is_static = t.static or \ + C.DEFAULT_TASK_INCLUDES_STATIC or \ + (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \ + not templar._contains_vars(t.args.get('_raw_params')) and t.loop is None + + if is_static: + if t.loop is not None: + raise AnsibleParserError("You cannot use 'static' on an include with a loop", obj=task_ds) + + # FIXME: all of this code is very similar (if not identical) to that in + # plugins/strategy/__init__.py, and should be unified to avoid + # patches only being applied to one or the other location + if task_include: + # handle relative includes by walking up the list of parent include + # tasks and checking the relative result to see if it exists + parent_include = task_include + cumulative_path = None + while parent_include is not None: + parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params'))) + if cumulative_path is None: + cumulative_path = parent_include_dir + elif not os.path.isabs(cumulative_path): + cumulative_path = os.path.join(parent_include_dir, cumulative_path) + include_target = templar.template(t.args['_raw_params']) + if t._role: + new_basedir = os.path.join(t._role._role_path, 'tasks', cumulative_path) + include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target) + else: + include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target) + + if os.path.exists(include_file): + break + else: + parent_include = parent_include._task_include + else: + try: + include_target = templar.template(t.args['_raw_params']) + except AnsibleUndefinedVariable as e: + raise AnsibleParserError( + "Error when evaluating variable in include name: %s.\n\n" \ + "When using static includes, ensure that any variables used in their names are defined in vars/vars_files\n" \ + "or extra-vars passed in from the command line. Static includes cannot use variables from inventory\n" \ + "sources like group or host vars." % t.args['_raw_params'], + obj=task_ds, + suppress_extended_error=True, + ) + if t._role: + if use_handlers: + include_file = loader.path_dwim_relative(t._role._role_path, 'handlers', include_target) + else: + include_file = loader.path_dwim_relative(t._role._role_path, 'tasks', include_target) + else: + include_file = loader.path_dwim(include_target) + + data = loader.load_from_file(include_file) + if data is None: + return [] + elif not isinstance(data, list): + raise AnsibleError("included task files must contain a list of tasks", obj=data) + + included_blocks = load_list_of_blocks( + data, + play=play, + parent_block=block, + task_include=t, + role=role, + use_handlers=use_handlers, + loader=loader, + variable_manager=variable_manager, + ) + + # Remove the raw params field from the module args, so it won't show up + # later when getting the vars for this task/childen + t.args.pop('_raw_params', None) + + # pop tags out of the include args, if they were specified there, and assign + # them to the include. If the include already had tags specified, we raise an + # error so that users know not to specify them both ways + tags = t.vars.pop('tags', []) + if isinstance(tags, string_types): + tags = tags.split(',') + + if len(tags) > 0: + if len(t.tags) > 0: + raise AnsibleParserError( + "Include tasks should not specify tags in more than one way (both via args and directly on the task)." \ + " Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement", + obj=task_ds, + suppress_extended_error=True, + ) + display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option") + else: + tags = t.tags[:] + + # now we extend the tags on each of the included blocks + for b in included_blocks: + b.tags = list(set(b.tags).union(tags)) + # END FIXME + + # FIXME: send callback here somehow... + # FIXME: handlers shouldn't need this special handling, but do + # right now because they don't iterate blocks correctly + if use_handlers: + for b in included_blocks: + task_list.extend(b.block) + else: + task_list.extend(included_blocks) + else: + task_list.append(t) + elif use_handlers: + t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) + task_list.append(t) + else: + t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) + task_list.append(t) return task_list diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py index cc756a75a96..1c0001f6b56 100644 --- a/lib/ansible/playbook/included_file.py +++ b/lib/ansible/playbook/included_file.py @@ -84,6 +84,9 @@ class IncludedFile: task_vars['item'] = include_variables['item'] = include_result['item'] if original_task: + if original_task.static: + continue + if original_task._task_include: # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index b86ad0fd024..7523bdae9d3 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -176,16 +176,16 @@ class Role(Base, Become, Conditional, Taggable): task_data = self._load_role_yaml('tasks') if task_data: try: - self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader) + self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager) except AssertionError: raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data) handler_data = self._load_role_yaml('handlers') if handler_data: try: - self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader) - except: - raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data) + self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader, variable_manager=self._variable_manager) + except AssertionError: + raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=handler_data) # vars and default vars are regular dictionaries self._role_vars = self._load_role_yaml('vars') diff --git a/lib/ansible/playbook/task_include.py b/lib/ansible/playbook/task_include.py new file mode 100644 index 00000000000..4b1d2c098b8 --- /dev/null +++ b/lib/ansible/playbook/task_include.py @@ -0,0 +1,72 @@ +# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.playbook.attribute import FieldAttribute +from ansible.playbook.task import Task + +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + +__all__ = ['TaskInclude'] + + +class TaskInclude(Task): + + """ + A task include is derived from a regular task to handle the special + circumstances related to the `- include: ...` task. + """ + + # ================================================================================= + # ATTRIBUTES + + _static = FieldAttribute(isa='bool', default=False) + + @staticmethod + def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None): + t = TaskInclude(block=block, role=role, task_include=task_include) + return t.load_data(data, variable_manager=variable_manager, loader=loader) + + def get_vars(self): + ''' + We override the parent Task() classes get_vars here because + we need to include the args of the include into the vars as + they are params to the included tasks. + ''' + all_vars = dict() + if self._block: + all_vars.update(self._block.get_vars()) + if self._task_include: + all_vars.update(self._task_include.get_vars()) + + all_vars.update(self.vars) + all_vars.update(self.args) + + if 'tags' in all_vars: + del all_vars['tags'] + if 'when' in all_vars: + del all_vars['when'] + + return all_vars + From 13b295f1eeb2789d83f158a9901aef8fb62ffcff Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 24 Mar 2016 09:21:49 -0700 Subject: [PATCH 1112/1113] Fix using non-ascii for inventory hostname patterns with the CLI. --- lib/ansible/cli/adhoc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 52ab761d523..c69bb42cc8c 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -32,6 +32,7 @@ from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play from ansible.plugins import get_all_plugin_loaders from ansible.utils.vars import load_extra_vars +from ansible.utils.unicode import to_unicode from ansible.vars import VariableManager try: @@ -95,7 +96,7 @@ class AdHocCLI(CLI): super(AdHocCLI, self).run() # only thing left should be host pattern - pattern = self.args[0] + pattern = to_unicode(self.args[0]) # ignore connection password cause we are local if self.options.connection == "local": From 228ad3ca39ec74cd7113514943691f861b2986b2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi <toshio@fedoraproject.org> Date: Thu, 24 Mar 2016 09:25:37 -0700 Subject: [PATCH 1113/1113] Should be errors=strict since we don't want to end up matching hosts like '???' --- lib/ansible/cli/adhoc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index c69bb42cc8c..d7fc5d7fad3 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -96,7 +96,7 @@ class AdHocCLI(CLI): super(AdHocCLI, self).run() # only thing left should be host pattern - pattern = to_unicode(self.args[0]) + pattern = to_unicode(self.args[0], errors='strict') # ignore connection password cause we are local if self.options.connection == "local":