From 45f41f4b054f4ce20e84fbfc5e5e2743285c35d0 Mon Sep 17 00:00:00 2001 From: louis Date: Tue, 30 Sep 2014 08:16:19 +0200 Subject: [PATCH 001/386] added skeleton support in user module --- system/user.py | 40 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/system/user.py b/system/user.py index 551384a7a67..e74cc33ce07 100644 --- a/system/user.py +++ b/system/user.py @@ -74,6 +74,10 @@ options: required: false description: - Optionally set the user's home directory. + skeleton: + required: false + description: + - Optionally set a home skeleton directory. Requires createhome option! password: required: false description: @@ -245,6 +249,7 @@ class User(object): self.remove = module.params['remove'] self.createhome = module.params['createhome'] self.move_home = module.params['move_home'] + self.skeleton = module.params['skeleton'] self.system = module.params['system'] self.login_class = module.params['login_class'] self.append = module.params['append'] @@ -324,6 +329,10 @@ class User(object): if self.createhome: cmd.append('-m') + + if self.skeleton is not None: + cmd.append('-k') + cmd.append(self.skeleton) else: cmd.append('-M') @@ -593,10 +602,14 @@ class User(object): def create_homedir(self, path): if not os.path.exists(path): - # use /etc/skel if possible - if os.path.exists('/etc/skel'): + if self.skeleton is not None: + skeleton = self.skeleton + else: + skeleton = '/etc/skel' + + if os.path.exists(skeleton): try: - shutil.copytree('/etc/skel', path, symlinks=True) + shutil.copytree(skeleton, path, symlinks=True) except OSError, e: self.module.exit_json(failed=True, msg="%s" % e) else: @@ -684,6 +697,10 @@ class FreeBsdUser(User): if self.createhome: cmd.append('-m') + if self.skeleton is not None: + cmd.append('-k') + cmd.append(self.skeleton) + if self.shell is not None: cmd.append('-s') cmd.append(self.shell) @@ -859,6 +876,10 @@ class OpenBSDUser(User): if self.createhome: cmd.append('-m') + if self.skeleton is not None: + cmd.append('-k') + cmd.append(self.skeleton) + cmd.append(self.name) return self.execute_command(cmd) @@ -1026,6 +1047,10 @@ class NetBSDUser(User): if self.createhome: cmd.append('-m') + if self.skeleton is not None: + cmd.append('-k') + cmd.append(self.skeleton) + cmd.append(self.name) return self.execute_command(cmd) @@ -1178,6 +1203,10 @@ class SunOS(User): if self.createhome: cmd.append('-m') + if self.skeleton is not None: + cmd.append('-k') + cmd.append(self.skeleton) + cmd.append(self.name) if self.module.check_mode: @@ -1351,6 +1380,10 @@ class AIX(User): if self.createhome: cmd.append('-m') + if self.skeleton is not None: + cmd.append('-k') + cmd.append(self.skeleton) + cmd.append(self.name) (rc, out, err) = self.execute_command(cmd) @@ -1473,6 +1506,7 @@ def main(): remove=dict(default='no', type='bool'), # following options are specific to useradd createhome=dict(default='yes', type='bool'), + skeleton=dict(default=None, type='str'), system=dict(default='no', type='bool'), # following options are specific to usermod move_home=dict(default='no', type='bool'), From 102167f22ecc8dd2dd4c0fed919f02579de18f17 Mon Sep 17 00:00:00 2001 From: Jim Patterson Date: Sun, 30 Nov 2014 19:31:09 -0500 Subject: [PATCH 002/386] Correct check mode for pip in virtualenv. Fix #412. Check mode was always returning changed=True for pip when the target was in a virtualenv. The code now uses the normal tests for determining if change status. --- packaging/language/pip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 17f52c00398..3ba93185a31 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -314,7 +314,7 @@ def main(): this_dir = os.path.join(this_dir, chdir) if module.check_mode: - if env or extra_args or requirements or state == 'latest' or not name: + if extra_args or requirements or state == 'latest' or not name: module.exit_json(changed=True) elif name.startswith('svn+') or name.startswith('git+') or \ name.startswith('hg+') or name.startswith('bzr+'): From cda40bc33c0da4444bd83ba527b198545ff99085 Mon Sep 17 00:00:00 2001 From: Sebastian Gerhards Date: Tue, 2 Dec 2014 11:33:10 +0100 Subject: [PATCH 003/386] rhn_register: add support for profilename --- packaging/os/rhn_register.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/packaging/os/rhn_register.py b/packaging/os/rhn_register.py index 1e92405c827..4207acc8c28 100644 --- a/packaging/os/rhn_register.py +++ b/packaging/os/rhn_register.py @@ -56,6 +56,11 @@ options: - supply an activation key for use with registration required: False default: null + profilename: + description: + - supply an profilename for use with registration + required: False + default: null channels: description: - Optionally specify a list of comma-separated channels to subscribe to upon successful registration. @@ -73,6 +78,9 @@ EXAMPLES = ''' # Register with activationkey (1-222333444) and enable extended update support. - rhn_register: state=present activationkey=1-222333444 enable_eus=true +# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname. +- rhn_register: state=present activationkey=1-222333444 profilename=host.example.com.custom + # Register as user (joe_user) with password (somepass) against a satellite # server specified by (server_url). - rhn_register: > @@ -209,7 +217,7 @@ class Rhn(RegistrationBase): self.update_plugin_conf('rhnplugin', True) self.update_plugin_conf('subscription-manager', False) - def register(self, enable_eus=False, activationkey=None): + def register(self, enable_eus=False, activationkey=None, profilename=None): ''' Register system to RHN. If enable_eus=True, extended update support will be requested. @@ -221,7 +229,8 @@ class Rhn(RegistrationBase): register_cmd += " --use-eus-channel" if activationkey is not None: register_cmd += " --activationkey '%s'" % activationkey - # FIXME - support --profilename + if profilename is not None: + register_cmd += " --profilename '%s'" % profilename # FIXME - support --systemorgid rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True) @@ -285,6 +294,7 @@ def main(): password = dict(default=None, required=False), server_url = dict(default=rhn.config.get_option('serverURL'), required=False), activationkey = dict(default=None, required=False), + profilename = dict(default=None, required=False), enable_eus = dict(default=False, type='bool'), channels = dict(default=[], type='list'), ) @@ -295,6 +305,7 @@ def main(): rhn.password = module.params['password'] rhn.configure(module.params['server_url']) activationkey = module.params['activationkey'] + profilename = module.params['profilename'] channels = module.params['channels'] rhn.module = module From 18183caf8616967e2a6ee6f10ca679b364a2f6ea Mon Sep 17 00:00:00 2001 From: Alex King Date: Mon, 8 Dec 2014 00:01:55 +1300 Subject: [PATCH 004/386] Extend hashes that can be specified by crypt_scheme beyond those understood by Apache/Nginx. --- web_infrastructure/htpasswd.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 4a72ea37fec..e263f842fa0 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -46,7 +46,10 @@ options: choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] default: "apr_md5_crypt" description: - - Encryption scheme to be used. + - Encryption scheme to be used. As well as the four choices listed + here, you can also use any other hash supported by passlib, such as + md5_crypt and sha256_crypt, which are linux passwd hashes. If you + do so the password file will not be compatible with Apache or Nginx state: required: false choices: [ present, absent ] @@ -74,6 +77,8 @@ EXAMPLES = """ - htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640 # Remove a user from a password file - htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent +# Add a user to a password file suitable for use by libpam-pwdfile +- htpasswd: path=/etc/mail/passwords name=alex password=oedu2eGh crypt_scheme=md5_crypt """ @@ -81,13 +86,15 @@ import os from distutils.version import StrictVersion try: - from passlib.apache import HtpasswdFile + from passlib.apache import HtpasswdFile, htpasswd_context + from passlib.context import CryptContext import passlib except ImportError: passlib_installed = False else: passlib_installed = True +apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] def create_missing_directories(dest): destpath = os.path.dirname(dest) @@ -99,6 +106,10 @@ def present(dest, username, password, crypt_scheme, create, check_mode): """ Ensures user is present Returns (msg, changed) """ + if crypt_scheme in apache_hashes: + context = htpasswd_context + else: + context = CryptContext(schemes = [ crypt_scheme ] + apache_hashes) if not os.path.exists(dest): if not create: raise ValueError('Destination %s does not exist' % dest) @@ -106,9 +117,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode): return ("Create %s" % dest, True) create_missing_directories(dest) if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme) + ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) else: - ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme) + ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) if getattr(ht, 'set_password', None): ht.set_password(username, password) else: @@ -117,9 +128,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode): return ("Created %s and added %s" % (dest, username), True) else: if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme) + ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) else: - ht = HtpasswdFile(dest, default=crypt_scheme) + ht = HtpasswdFile(dest, default=crypt_scheme, context=context) found = None if getattr(ht, 'check_password', None): From 3fca5e587694989cf74808d49341b83b487a782b Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Sun, 14 Dec 2014 22:53:21 +0000 Subject: [PATCH 005/386] Allow globbing in creates= and removes= directives Fixes 1904 --- commands/command.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/commands/command.py b/commands/command.py index c584d6feed8..bc286d6855d 100644 --- a/commands/command.py +++ b/commands/command.py @@ -20,6 +20,7 @@ import sys import datetime +import glob import traceback import re import shlex @@ -161,7 +162,7 @@ def main(): # and the filename already exists. This allows idempotence # of command executions. v = os.path.expanduser(creates) - if os.path.exists(v): + if glob.glob(v): module.exit_json( cmd=args, stdout="skipped, since %s exists" % v, @@ -175,7 +176,7 @@ def main(): # and the filename does not exist. This allows idempotence # of command executions. v = os.path.expanduser(removes) - if not os.path.exists(v): + if not glob.glob(v): module.exit_json( cmd=args, stdout="skipped, since %s does not exist" % v, From bb6182575e2f8b51e79d272e80d4e9e9e68fed83 Mon Sep 17 00:00:00 2001 From: Austin Brown Date: Wed, 17 Dec 2014 10:24:31 -0500 Subject: [PATCH 006/386] Updating rubygems --no-document param --- packaging/language/gem.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packaging/language/gem.py b/packaging/language/gem.py index 3740a3e7ce3..05a8569546f 100644 --- a/packaging/language/gem.py +++ b/packaging/language/gem.py @@ -182,8 +182,7 @@ def install(module): cmd.append('--no-user-install') if module.params['pre_release']: cmd.append('--pre') - cmd.append('--no-rdoc') - cmd.append('--no-ri') + cmd.append('--no-document') cmd.append(module.params['gem_source']) module.run_command(cmd, check_rc=True) From 8f3b5c640b98ba9473a0df7ddc650539a3efc048 Mon Sep 17 00:00:00 2001 From: Stefan Junker Date: Sun, 21 Dec 2014 16:10:39 +0100 Subject: [PATCH 007/386] Fix bind-volumes on docker >= 1.4.0 If bind-volumes are submitted to docker >= 1.4.0 with the volumes set in addition to the binds, docker will create a regular volume and not bind-mount the specified path. --- cloud/docker/docker.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f53819f2679..bbcb73df99b 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -484,20 +484,22 @@ class DockerManager(object): vols = self.module.params.get('volumes') for vol in vols: parts = vol.split(":") - # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) - if len(parts) == 2: - self.volumes[parts[1]] = {} - self.binds[parts[0]] = parts[1] - # with bind mode - elif len(parts) == 3: - if parts[2] not in ['ro', 'rw']: - self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') - ro = parts[2] == 'ro' - self.volumes[parts[1]] = {} - self.binds[parts[0]] = {'bind': parts[1], 'ro': ro} - # docker mount (e.g. /www, mounts a docker volume /www on the container at the same location) - else: + # regular volume + if len(parts) == 1: self.volumes[parts[0]] = {} + # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) + elif 2 <= len(parts) <= 3: + # default to read-write + ro = False + # with supplied bind mode + if len(parts) == 3: + if parts[2] not in ['ro', 'rw']: + self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') + else: + ro = parts[2] == 'ro' + self.binds[parts[0]] = {'bind': parts[1], 'ro': ro } + else: + self.module.fail_json(msg='volumes support 1 to 3 arguments') self.lxc_conf = None if self.module.params.get('lxc_conf'): From 00b4f4d543512c0da9b80917988c9d1abab5515a Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Thu, 25 Dec 2014 17:36:51 -0500 Subject: [PATCH 008/386] Fix to revoke privileges for mysql user = '' Issue #9848 --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index e160fcb68f6..f4b620c0e9a 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -254,7 +254,7 @@ def privileges_get(cursor, user,host): return x for grant in grants: - res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) + res = re.match("GRANT (.+) ON (.+) TO '.*'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) if res is None: raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0]) privileges = res.group(1).split(", ") From d9360a7613318a593d4ed5688269979dc60c7d72 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Fri, 26 Dec 2014 01:29:54 -0500 Subject: [PATCH 009/386] Update docs, add example of using django_manage to run other commands. --- web_infrastructure/django_manage.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 3e34a6388c0..d71001fd8c2 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -30,7 +30,8 @@ options: command: choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ] description: - - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. Other commands can be entered, but will fail if they're unknown to Django. + - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. + - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag. required: true app_path: description: @@ -102,7 +103,7 @@ EXAMPLES = """ # Load the initial_data fixture into the application - django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }} -#Run syncdb on the application +# Run syncdb on the application - django_manage: > command=syncdb app_path={{ django_dir }} @@ -110,8 +111,11 @@ EXAMPLES = """ pythonpath={{ settings_dir }} virtualenv={{ virtualenv_dir }} -#Run the SmokeTest test case from the main app. Useful for testing deploys. -- django_manage: command=test app_path=django_dir apps=main.SmokeTest +# Run the SmokeTest test case from the main app. Useful for testing deploys. +- django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest + +# Create an initial superuser. +- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }} """ From aa99eade7e45b6995c5cbc364cb45ff9cdbe2598 Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Sat, 27 Dec 2014 20:08:25 -0500 Subject: [PATCH 010/386] ansible-modules-core #530 fix - Mount module does not accept spaces in mount point path --- system/mount.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/system/mount.py b/system/mount.py index 9dc6fbe7b8c..0d78d6791f1 100644 --- a/system/mount.py +++ b/system/mount.py @@ -114,6 +114,11 @@ def set_mount(**kwargs): ) args.update(kwargs) + # save the mount name before space replacement + origname = args['name'] + # replace any space in mount name with '\040' to make it fstab compatible (man fstab) + args['name'] = args['name'].replace(' ', r'\040') + new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' to_write = [] @@ -158,7 +163,8 @@ def set_mount(**kwargs): if changed: write_fstab(to_write, args['fstab']) - return (args['name'], changed) + # mount function needs origname + return (origname, changed) def unset_mount(**kwargs): @@ -173,6 +179,11 @@ def unset_mount(**kwargs): ) args.update(kwargs) + # save the mount name before space replacement + origname = args['name'] + # replace any space in mount name with '\040' to make it fstab compatible (man fstab) + args['name'] = args['name'].replace(' ', r'\040') + to_write = [] changed = False for line in open(args['fstab'], 'r').readlines(): @@ -201,7 +212,8 @@ def unset_mount(**kwargs): if changed: write_fstab(to_write, args['fstab']) - return (args['name'], changed) + # umount needs origname + return (origname, changed) def mount(module, **kwargs): From 659c6d3a7eefcf301f33270c2c1d4f886009b6a6 Mon Sep 17 00:00:00 2001 From: Andrey Trubachev Date: Sat, 3 Jan 2015 10:32:27 +0300 Subject: [PATCH 011/386] Fix 'wait_for' doesn't work with ipv6only host --- utilities/logic/wait_for.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index ae316fe1a17..41852148971 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -351,10 +351,8 @@ def main(): except IOError: break elif port: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) try: - s.connect( (host, port) ) + s = socket.create_connection( (host, port), connect_timeout) s.shutdown(socket.SHUT_RDWR) s.close() time.sleep(1) @@ -397,10 +395,8 @@ def main(): elapsed = datetime.datetime.now() - start module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds) elif port: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) try: - s.connect( (host, port) ) + s = socket.create_connection( (host, port), connect_timeout) if search_regex: data = '' matched = False From 10e48d18f7c81106bfd5fa663492e06fd9ef7c9b Mon Sep 17 00:00:00 2001 From: Andrey Trubachev Date: Mon, 5 Jan 2015 21:05:01 +0300 Subject: [PATCH 012/386] Fix python-2.4 compatibility --- utilities/logic/wait_for.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 41852148971..3e40d4b0027 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -293,6 +293,25 @@ def _little_endian_convert_32bit(block): # which lets us start at the end of the string block and work to the begining return "".join([ block[x:x+2] for x in xrange(6, -2, -2) ]) +def _create_connection( (host, port), connect_timeout): + """ + Connect to a 2-tuple (host, port) and return + the socket object. + + Args: + 2-tuple (host, port) and connection timeout + Returns: + Socket object + """ + if sys.version_info < (2, 6): + (family, _) = _convert_host_to_ip(host) + connect_socket = socket.socket(family, socket.SOCK_STREAM) + connect_socket.settimeout(connect_timeout) + connect_socket.connect( (host, port) ) + else: + connect_socket = socket.create_connection( (host, port), connect_timeout) + return connect_socket + def main(): module = AnsibleModule( @@ -352,7 +371,7 @@ def main(): break elif port: try: - s = socket.create_connection( (host, port), connect_timeout) + s = _create_connection( (host, port), connect_timeout) s.shutdown(socket.SHUT_RDWR) s.close() time.sleep(1) @@ -396,7 +415,7 @@ def main(): module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds) elif port: try: - s = socket.create_connection( (host, port), connect_timeout) + s = _create_connection( (host, port), connect_timeout) if search_regex: data = '' matched = False From 9e381264ae599788f77a629ce3ffc7d24cf7c20a Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Wed, 7 Jan 2015 04:47:58 +0000 Subject: [PATCH 013/386] Document globbing support --- commands/command.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/command.py b/commands/command.py index bc286d6855d..f9d2b18c921 100644 --- a/commands/command.py +++ b/commands/command.py @@ -47,12 +47,12 @@ options: aliases: [] creates: description: - - a filename, when it already exists, this step will B(not) be run. + - a filename or glob pattern, when it already exists, this step will B(not) be run. required: no default: null removes: description: - - a filename, when it does not exist, this step will B(not) be run. + - a filename or glob pattern, when it does not exist, this step will B(not) be run. version_added: "0.8" required: no default: null From a8b8128aac9f51241c9a3da74ee28aa59c9ead13 Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Thu, 8 Jan 2015 02:06:47 +0000 Subject: [PATCH 014/386] Remove skipped=True when using creates and removes Based on #8645 --- network/basics/uri.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index aac724a8f13..9be0a06cdce 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -381,7 +381,7 @@ def main(): # of uri executions. creates = os.path.expanduser(creates) if os.path.exists(creates): - module.exit_json(stdout="skipped, since %s exists" % creates, skipped=True, changed=False, stderr=False, rc=0) + module.exit_json(stdout="skipped, since %s exists" % creates, changed=False, stderr=False, rc=0) if removes is not None: # do not run the command if the line contains removes=filename @@ -389,7 +389,7 @@ def main(): # of uri executions. v = os.path.expanduser(removes) if not os.path.exists(removes): - module.exit_json(stdout="skipped, since %s does not exist" % removes, skipped=True, changed=False, stderr=False, rc=0) + module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0) # httplib2 only sends authentication after the server asks for it with a 401. From a935baf7dd24f1f4dd95ca39b8cbbd1c3f17ac66 Mon Sep 17 00:00:00 2001 From: Annika Backstrom Date: Thu, 22 Jan 2015 10:51:09 -0500 Subject: [PATCH 015/386] Force redownload if sha256sum does not match --- network/basics/get_url.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index b0d27859420..1fdb90a9da9 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -217,8 +217,29 @@ def main(): dest_is_dir = os.path.isdir(dest) last_mod_time = None + # Remove any non-alphanumeric characters, including the infamous + # Unicode zero-width space + stripped_sha256sum = re.sub(r'\W+', '', sha256sum) + + # Fail early if sha256 is not supported + if sha256sum != '' and not HAS_HASHLIB: + module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") + if not dest_is_dir and os.path.exists(dest): - if not force: + checksum_mismatch = False + + # If the download is not forced and there is a checksum, allow + # checksum match to skip the download. + if not force and sha256sum != '': + destination_checksum = module.sha256(dest) + + if stripped_sha256sum.lower() == destination_checksum: + module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) + + checksum_mismatch = True + + # Not forcing redownload, unless sha256sum has already failed + if not force and not checksum_mismatch: module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) # If the file already exists, prepare the last modified time for the @@ -281,15 +302,7 @@ def main(): # Check the digest of the destination file and ensure that it matches the # sha256sum parameter if it is present if sha256sum != '': - # Remove any non-alphanumeric characters, including the infamous - # Unicode zero-width space - stripped_sha256sum = re.sub(r'\W+', '', sha256sum) - - if not HAS_HASHLIB: - os.remove(dest) - module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") - else: - destination_checksum = module.sha256(dest) + destination_checksum = module.sha256(dest) if stripped_sha256sum.lower() != destination_checksum: os.remove(dest) From fd061d437c8568f02f5997fd3e7d2b49202b1c5d Mon Sep 17 00:00:00 2001 From: Wouter Bolsterlee Date: Mon, 12 Jan 2015 13:38:42 +0100 Subject: [PATCH 016/386] Fix typo in `version_added` field. --- packaging/language/pip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 97576a5258b..b8c01ba5247 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -63,7 +63,7 @@ options: default: "no" choices: [ "yes", "no" ] virtualenv_command: - version_aded: "1.1" + version_added: "1.1" description: - The command or a pathname to the command to create the virtual environment with. For example C(pyvenv), C(virtualenv), From 19b49c1d9614fc2680f7d8e752f9eafdeac3a70c Mon Sep 17 00:00:00 2001 From: Wouter Bolsterlee Date: Wed, 28 Jan 2015 12:45:33 +0100 Subject: [PATCH 017/386] A few coding style cleanups --- packaging/language/pip.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index b8c01ba5247..e56c7ef7abd 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -282,14 +282,14 @@ def main(): cmd = '%s %s' % (pip, state_map[state]) # If there's a virtualenv we want things we install to be able to use other - # installations that exist as binaries within this virtualenv. Example: we - # install cython and then gevent -- gevent needs to use the cython binary, - # not just a python package that will be found by calling the right python. + # installations that exist as binaries within this virtualenv. Example: we + # install cython and then gevent -- gevent needs to use the cython binary, + # not just a python package that will be found by calling the right python. # So if there's a virtualenv, we add that bin/ to the beginning of the PATH # in run_command by setting path_prefix here. path_prefix = None if env: - path_prefix="/".join(pip.split('/')[:-1]) + path_prefix = "/".join(pip.split('/')[:-1]) # Automatically apply -e option to extra_args when source is a VCS url. VCS # includes those beginning with svn+, git+, hg+ or bzr+ @@ -350,7 +350,8 @@ def main(): changed = 'Successfully installed' in out_pip module.exit_json(changed=changed, cmd=cmd, name=name, version=version, - state=state, requirements=requirements, virtualenv=env, stdout=out, stderr=err) + state=state, requirements=requirements, virtualenv=env, + stdout=out, stderr=err) # import module snippets from ansible.module_utils.basic import * From 8ce0284ace74cb12af4b264d95dfe4f7af7c1a70 Mon Sep 17 00:00:00 2001 From: Wouter Bolsterlee Date: Wed, 28 Jan 2015 12:45:25 +0100 Subject: [PATCH 018/386] Add a virtualenv_python argument to the pip module This allows specifying the Python version to use when creating the virtualenv. See issue #586. --- packaging/language/pip.py | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index e56c7ef7abd..242a815a93f 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -70,6 +70,14 @@ options: C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv). required: false default: virtualenv + virtualenv_python: + version_added: "FIXME" + description: + - The Python executable used for creating the virtual environment. + For example C(python3.4), C(python2.7). When not specified, the + system Python version is used. + required: false + default: null state: description: - The state of module @@ -224,6 +232,7 @@ def main(): virtualenv=dict(default=None, required=False), virtualenv_site_packages=dict(default='no', type='bool'), virtualenv_command=dict(default='virtualenv', required=False), + virtualenv_python=dict(default=None, required=False, type='str'), use_mirrors=dict(default='yes', type='bool'), extra_args=dict(default=None, required=False), chdir=dict(default=None, required=False), @@ -239,6 +248,7 @@ def main(): version = module.params['version'] requirements = module.params['requirements'] extra_args = module.params['extra_args'] + virtualenv_python = module.params['virtualenv_python'] chdir = module.params['chdir'] if state == 'latest' and version is not None: @@ -256,18 +266,21 @@ def main(): if module.check_mode: module.exit_json(changed=True) - virtualenv = os.path.expanduser(virtualenv_command) - if os.path.basename(virtualenv) == virtualenv: - virtualenv = module.get_bin_path(virtualenv_command, True) + cmd = os.path.expanduser(virtualenv_command) + if os.path.basename(cmd) == cmd: + cmd = module.get_bin_path(virtualenv_command, True) if module.params['virtualenv_site_packages']: - cmd = '%s --system-site-packages %s' % (virtualenv, env) + cmd += ' --system-site-packages' else: - cmd_opts = _get_cmd_options(module, virtualenv) + cmd_opts = _get_cmd_options(module, cmd) if '--no-site-packages' in cmd_opts: - cmd = '%s --no-site-packages %s' % (virtualenv, env) - else: - cmd = '%s %s' % (virtualenv, env) + cmd += ' --no-site-packages' + + if virtualenv_python: + cmd += ' -p%s' % virtualenv_python + + cmd = "%s %s" % (cmd, env) this_dir = tempfile.gettempdir() if chdir: this_dir = os.path.join(this_dir, chdir) From d7efb2635c003bdb6dcb8e1321f8c94434f720cf Mon Sep 17 00:00:00 2001 From: Jan Inowolski Date: Thu, 29 Jan 2015 12:38:58 +0100 Subject: [PATCH 019/386] update git remote url before ls-remote related to #8177 --- source_control/git.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index 0cb87304a92..fb7af79f2c4 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -473,10 +473,20 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare, refspec): +def set_remote_url(git_path, module, repo, dest, remote): ''' updates repo from remote sources ''' commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])] + for (label,command) in commands: + (rc,out,err) = module.run_command(command, cwd=dest) + if rc != 0: + module.fail_json(msg="Failed to %s: %s %s" % (label, out, err)) + +def fetch(git_path, module, repo, dest, version, remote, bare, refspec): + ''' updates repo from remote sources ''' + set_remote_url(git_path, module, repo, dest, remote) + commands = [] + fetch_str = 'download remote objects and refs' if bare: @@ -709,6 +719,7 @@ def main(): if not module.check_mode: reset(git_path, module, dest) # exit if already at desired sha version + set_remote_url(git_path, module, repo, dest, remote) remote_head = get_remote_head(git_path, module, dest, version, remote, bare) if before == remote_head: if local_mods: From 6d6e948f1e869811f99eb9b3402234ccf3f24716 Mon Sep 17 00:00:00 2001 From: Alexis Seigneurin Date: Mon, 2 Feb 2015 14:51:04 +0100 Subject: [PATCH 020/386] - 'name' should not be required so as to allow uninstalling a cron_file --- system/cron.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/cron.py b/system/cron.py index c0a39b61c61..ed62674c01c 100644 --- a/system/cron.py +++ b/system/cron.py @@ -46,7 +46,7 @@ options: description: - Description of a crontab entry. default: null - required: true + required: false user: description: - The specific user whose crontab should be modified. @@ -397,7 +397,7 @@ def main(): module = AnsibleModule( argument_spec = dict( - name=dict(required=True), + name=dict(required=False), user=dict(required=False), job=dict(required=False), cron_file=dict(required=False), From 80da041eb61c8397b21f0e06d26c7b2c58745417 Mon Sep 17 00:00:00 2001 From: Jens Carl Date: Fri, 20 Feb 2015 17:22:03 +0000 Subject: [PATCH 021/386] - List the name servers of a zone. --- cloud/amazon/route53.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 7fbe8552f41..9454a7ba81b 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -248,7 +248,13 @@ def main(): module.exit_json(changed=False) if command_in == 'get': - module.exit_json(changed=False, set=record) + if type_in == 'NS': + ns = record['values'] + else: + # Retrieve name servers associated to the zone. + ns = conn.get_zone(zone_in).get_nameservers() + + module.exit_json(changed=False, set=record, nameservers=ns) if command_in == 'delete' and not found_record: module.exit_json(changed=False) From d9f8fa56d8af9de402ee9f48ea832709b20754a8 Mon Sep 17 00:00:00 2001 From: gimoh Date: Mon, 23 Feb 2015 14:14:00 +0000 Subject: [PATCH 022/386] Do not insert extra newline if line already contains it When using YAML multi-line strings, e.g.: - lineinfile: dest: /tmp/foo line: > foo bar the line already ends with a newline. If an extra newline is appended unconditionally it will lead to inserting an extra newline on each run. --- files/lineinfile.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/files/lineinfile.py b/files/lineinfile.py index b9fc628e10c..480811dbbfa 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -242,8 +242,11 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, # Don't do backref expansion if not asked. new_line = line - if lines[index[0]] != new_line + os.linesep: - lines[index[0]] = new_line + os.linesep + if not new_line.endswith(os.linesep): + new_line += os.linesep + + if lines[index[0]] != new_line: + lines[index[0]] = new_line msg = 'line replaced' changed = True elif backrefs: From c3f92cca210db1f7042bfce1ff90645255f0b49e Mon Sep 17 00:00:00 2001 From: Stefan Junker Date: Thu, 12 Mar 2015 12:55:14 +0100 Subject: [PATCH 023/386] docker: Use a list instead of a dict for volumes according to the docker-py docs. Do not add host-binds to volumes list. --- cloud/docker/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 6e571a7ba5d..fcc14b5a5e0 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -474,13 +474,13 @@ class DockerManager(object): self.volumes = None if self.module.params.get('volumes'): self.binds = {} - self.volumes = {} + self.volumes = [] vols = self.module.params.get('volumes') for vol in vols: parts = vol.split(":") # regular volume if len(parts) == 1: - self.volumes[parts[0]] = {} + self.volumes.append(parts[0]) # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) elif 2 <= len(parts) <= 3: # default to read-write From 83c6cd04f48c6388a075af5d9a709667b8f007b9 Mon Sep 17 00:00:00 2001 From: Tagir Bakirov Date: Fri, 13 Mar 2015 11:07:13 +0100 Subject: [PATCH 024/386] added 'absent' option to supervisorctl --- web_infrastructure/supervisorctl.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index f75992b9a6a..c3b52d0a79d 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -64,7 +64,7 @@ options: - The desired state of program/group. required: true default: null - choices: [ "present", "started", "stopped", "restarted" ] + choices: [ "present", "started", "stopped", "restarted", "absent" ] supervisorctl_path: description: - path to supervisorctl executable @@ -101,7 +101,7 @@ def main(): username=dict(required=False), password=dict(required=False), supervisorctl_path=dict(required=False), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped']) + state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent']) ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) @@ -196,6 +196,19 @@ def main(): processes = get_matched_processes() + if state == 'absent': + if len(processes) == 0: + module.exit_json(changed=False, name=name, state=state) + + if module.check_mode: + module.exit_json(changed=True) + run_supervisorctl('reread', check_rc=True) + rc, out, err = run_supervisorctl('remove', name) + if '%s: removed process group' % name in out: + module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) + if state == 'present': if len(processes) > 0: module.exit_json(changed=False, name=name, state=state) From bdc28a6bb351688f54ffab8ca6ca7d572e4f8f67 Mon Sep 17 00:00:00 2001 From: Ian Clegg Date: Fri, 20 Mar 2015 10:34:36 +0000 Subject: [PATCH 025/386] Added support for comma seperated feature names in the name parameter of the win_feature module --- windows/win_feature.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_feature.ps1 b/windows/win_feature.ps1 index a54007b47bf..458d942e328 100644 --- a/windows/win_feature.ps1 +++ b/windows/win_feature.ps1 @@ -28,7 +28,7 @@ $result = New-Object PSObject -Property @{ } If ($params.name) { - $name = $params.name + $name = $params.name -split ',' | % { $_.Trim() } } Else { Fail-Json $result "mising required argument: name" From 1f358721ffbaa302e7f352abc3d7291a3f238798 Mon Sep 17 00:00:00 2001 From: Isaac Simmons Date: Mon, 23 Mar 2015 11:46:59 -0400 Subject: [PATCH 026/386] Handle ini file properties that contain interpolation errors in the initial values --- files/ini_file.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/files/ini_file.py b/files/ini_file.py index e247c265fc8..ed3bb545702 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -120,6 +120,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if cp.get(section, option): cp.remove_option(section, option) changed = True + except ConfigParser.InterpolationError: + cp.remove_option(section, option) + changed = True except: pass @@ -143,6 +146,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese except ConfigParser.NoOptionError: cp.set(section, option, value) changed = True + except ConfigParser.InterpolationError: + cp.set(section, option, value) + changed = True if changed and not module.check_mode: if backup: From fc4c6594003094cd2b8f6ee92847d03c69205fc5 Mon Sep 17 00:00:00 2001 From: Jesse Sandberg Date: Thu, 26 Mar 2015 16:12:18 +0200 Subject: [PATCH 027/386] Validate variable, return only the found variable value instead of tuple Docs imply the mysql_variables is used to operate a single variable therefore - fail before making any db connections if variable is not set - validate chars for mysql variable name with re.match(^[a-z0-9_]+) - use "SHOW VARIABLE WHERE Variable_name" instead of LIKE search - getvariable() returns only the value or None if variable is not found - the module returns only the found variable value instead of tuple for easier operation eg. as registere variable in tasks --- database/mysql/mysql_variables.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 199c5eb6eca..7c9ec4459a9 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -67,6 +67,7 @@ EXAMPLES = ''' import ConfigParser import os import warnings +from re import match try: import MySQLdb @@ -103,10 +104,12 @@ def typedvalue(value): def getvariable(cursor, mysqlvar): - cursor.execute("SHOW VARIABLES LIKE %s", (mysqlvar,)) + cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,)) mysqlvar_val = cursor.fetchall() - return mysqlvar_val - + if len(mysqlvar_val) is 1: + return mysqlvar_val[0][1] + else: + return None def setvariable(cursor, mysqlvar, value): """ Set a global mysql variable to a given value @@ -116,11 +119,9 @@ def setvariable(cursor, mysqlvar, value): should be passed as numeric literals. """ - query = ["SET GLOBAL %s" % mysql_quote_identifier(mysqlvar, 'vars') ] - query.append(" = %s") - query = ' '.join(query) + query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars') try: - cursor.execute(query, (value,)) + cursor.execute(query + "%s", (value,)) cursor.fetchall() result = True except Exception, e: @@ -204,6 +205,10 @@ def main(): host = module.params["login_host"] mysqlvar = module.params["variable"] value = module.params["value"] + if mysqlvar is None: + module.fail_json(msg="Cannot run without variable to operate with") + if match('^[0-9a-z_]+$', mysqlvar) is None: + module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar) if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") else: @@ -232,17 +237,15 @@ def main(): cursor = db_connection.cursor() except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") - if mysqlvar is None: - module.fail_json(msg="Cannot run without variable to operate with") mysqlvar_val = getvariable(cursor, mysqlvar) + if mysqlvar_val is None: + module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False) if value is None: module.exit_json(msg=mysqlvar_val) else: - if len(mysqlvar_val) < 1: - module.fail_json(msg="Variable not available", changed=False) # Type values before using them value_wanted = typedvalue(value) - value_actual = typedvalue(mysqlvar_val[0][1]) + value_actual = typedvalue(mysqlvar_val) if value_wanted == value_actual: module.exit_json(msg="Variable already set to requested value", changed=False) try: From 7f3df5634a40ea007b76e6db7bcf8de8dd4df646 Mon Sep 17 00:00:00 2001 From: nemunaire Date: Fri, 27 Mar 2015 18:07:35 +0100 Subject: [PATCH 028/386] Add optional signal parameter when killing docker container --- cloud/docker/docker.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 85eb0525a69..1f30938964a 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -176,6 +176,12 @@ options: description: - Enable detached mode to leave the container running in background. default: true + signal: + description: + - With the state "killed", you can alter the signal sent to the + container. + required: false + default: KILL state: description: - Assert the container's desired state. "present" only asserts that the @@ -1272,7 +1278,7 @@ class DockerManager(object): def kill_containers(self, containers): for i in containers: - self.client.kill(i['Id']) + self.client.kill(i['Id'], self.module.params.get('signal')) self.increment_counter('killed') def restart_containers(self, containers): @@ -1436,6 +1442,7 @@ def main(): dns = dict(), detach = dict(default=True, type='bool'), state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']), + signal = dict(default=None), restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), restart_policy_retry = dict(default=0, type='int'), debug = dict(default=False, type='bool'), From b28459e6f5f5053d7ed0282aa061d994a95feb40 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 30 Mar 2015 15:52:17 -0400 Subject: [PATCH 029/386] Wait for process exit on detached=no. --- cloud/docker/docker.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 7eea7888059..2f5cb8690d9 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -174,7 +174,8 @@ options: default: null detach: description: - - Enable detached mode to leave the container running in background. + - Enable detached mode to leave the container running in background. If + disabled, fail unless the process exits cleanly. default: true state: description: @@ -1258,6 +1259,13 @@ class DockerManager(object): self.client.start(i['Id'], **params) self.increment_counter('started') + if not self.module.params.get('detach'): + status = self.client.wait(i['Id']) + if status != 0: + output = self.client.logs(i['Id'], stdout=True, stderr=True, + stream=False, timestamps=False) + self.module.fail_json(status=status, msg=output) + def stop_containers(self, containers): for i in containers: self.client.stop(i['Id']) From f38186ce8b49ea98e29241712da45917a3154e73 Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Fri, 3 Apr 2015 12:41:10 -0700 Subject: [PATCH 030/386] ansible_facts reflects 1.8 output --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 49e11f3caa0..6cb9802410e 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1504,7 +1504,7 @@ def main(): summary=manager.counters, containers=containers.changed, reload_reasons=manager.get_reload_reason_message(), - ansible_facts=_ansible_facts(containers.changed)) + ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed))) except DockerAPIError as e: module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) From e9a9d28b6dd9d6e90a96b285d5e17c32e3d6c8b3 Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Wed, 15 Apr 2015 13:43:00 +0300 Subject: [PATCH 031/386] Add support for specifying unique hosted zone identifier --- cloud/amazon/route53.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index f89fca448b7..75d3a66ae6b 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -35,6 +35,12 @@ options: required: true default: null aliases: [] + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone to modify + required: false + default: null + aliases: [] record: description: - The full DNS record to create or delete @@ -156,6 +162,16 @@ EXAMPLES = ''' alias=True alias_hosted_zone_id="{{ elb_zone_id }}" +# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value +# that the entire parameter list must be quoted: +- route53: + command: "create" + zone: "foo.com" + hostes_zone_id: "Z2AABBCCDDEEFF" + record: "localhost.foo.com" + type: "AAAA" + ttl: "7200" + value: "::1" ''' @@ -191,6 +207,7 @@ def main(): argument_spec.update(dict( command = dict(choices=['get', 'create', 'delete'], required=True), zone = dict(required=True), + hosted_zone_id = dict(required=False), record = dict(required=True), ttl = dict(required=False, default=3600), type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), @@ -209,6 +226,7 @@ def main(): command_in = module.params.get('command') zone_in = module.params.get('zone').lower() + hosted_zone_id_in = module.params.get('hosted_zone_id') ttl_in = module.params.get('ttl') record_in = module.params.get('record').lower() type_in = module.params.get('type') @@ -257,9 +275,17 @@ def main(): # the private_zone_in boolean specified in the params if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in: zone_id = r53zone['Id'].replace('/hostedzone/', '') - zones[r53zone['Name']] = zone_id + # only save when unique hosted_zone_id is given and is equal + # hosted_zone_id_in is specified in the params + if hosted_zone_id_in and zone_id == hosted_zone_id_in: + zones[r53zone['Name']] = zone_id + elif not hosted_zone_id_in: + zones[r53zone['Name']] = zone_id # Verify that the requested zone is already defined in Route53 + if not zone_in in zones and hosted_zone_id_in: + errmsg = "Hosted_zone_id %s does not exist in Route53" % hosted_zone_id_in + module.fail_json(msg = errmsg) if not zone_in in zones: errmsg = "Zone %s does not exist in Route53" % zone_in module.fail_json(msg = errmsg) @@ -282,6 +308,8 @@ def main(): record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) + if hosted_zone_id_in: + record['hosted_zone_id'] = hosted_zone_id_in if rset.alias_dns_name: record['alias'] = True record['value'] = rset.alias_dns_name From 85cff6699e0149bc129e4b384dd9af1efa00a3c1 Mon Sep 17 00:00:00 2001 From: Chris AtLee Date: Thu, 16 Apr 2015 17:06:19 -0400 Subject: [PATCH 032/386] Add support for 'update' parameter to hg module --- source_control/hg.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/source_control/hg.py b/source_control/hg.py index d83215fabe1..29086fb9aa5 100644 --- a/source_control/hg.py +++ b/source_control/hg.py @@ -65,6 +65,13 @@ options: required: false default: "no" choices: [ "yes", "no" ] + update: + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "2.0" + description: + - If C(no), do not retrieve new revisions from the origin repository executable: required: false default: null @@ -210,6 +217,7 @@ def main(): revision = dict(default=None, aliases=['version']), force = dict(default='no', type='bool'), purge = dict(default='no', type='bool'), + update = dict(default='yes', type='bool'), executable = dict(default=None), ), ) @@ -218,6 +226,7 @@ def main(): revision = module.params['revision'] force = module.params['force'] purge = module.params['purge'] + update = module.params['update'] hg_path = module.params['executable'] or module.get_bin_path('hg', True) hgrc = os.path.join(dest, '.hg/hgrc') @@ -234,6 +243,9 @@ def main(): (rc, out, err) = hg.clone() if rc != 0: module.fail_json(msg=err) + elif not update: + # Just return having found a repo already in the dest path + before = hg.get_revision() elif hg.at_revision: # no update needed, don't pull before = hg.get_revision() From 3c605d4aba68e0170d6779117e1d326790291059 Mon Sep 17 00:00:00 2001 From: Petros Moisiadis Date: Fri, 17 Apr 2015 19:43:25 +0300 Subject: [PATCH 033/386] make migrate command idempotent with django built-in migrations Django since version 1.7 has built-in migrations, so no need to have south installed with recent django versions. The 'migrate' command works with built-in migrations without any change, but the output is different than the output produced by south, which breaks idempotence. This commit fixes this. --- web_infrastructure/django_manage.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 46ebb2fb8f1..13207f955c2 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -89,7 +89,7 @@ notes: - I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified. - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location. - This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately. - - To be able to use the migrate command, you must have south installed and added as an app in your settings + - To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings - To be able to use the collectstatic command, you must have enabled staticfiles in your settings requirements: [ "virtualenv", "django" ] author: Scott Anderson @@ -159,7 +159,7 @@ def syncdb_filter_output(line): return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line) def migrate_filter_output(line): - return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) + return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line) def main(): command_allowed_param_map = dict( From 3b954edab2bf54c710b86d95482548b893d648fa Mon Sep 17 00:00:00 2001 From: Lucas David Traverso Date: Sun, 19 Apr 2015 04:39:59 -0300 Subject: [PATCH 034/386] django_manage: Use shebang in manage.py instead of hardcode python --- web_infrastructure/django_manage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 46ebb2fb8f1..c721456715f 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -234,7 +234,7 @@ def main(): _ensure_virtualenv(module) - cmd = "python manage.py %s" % (command, ) + cmd = "./manage.py %s" % (command, ) if command in noinput_commands: cmd = '%s --noinput' % cmd From 1f37f2a1523e2a69685220906f80bad3f8cbd760 Mon Sep 17 00:00:00 2001 From: "Hennadiy (Gena) Verkh" Date: Tue, 21 Apr 2015 11:43:09 +0200 Subject: [PATCH 035/386] Removed method restriction in uri module --- network/basics/uri.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 24f0dbf9e1f..7be1cc92159 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -73,7 +73,6 @@ options: description: - The HTTP method of the request or response. required: false - choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH" ] default: "GET" return_content: description: @@ -341,7 +340,7 @@ def main(): password = dict(required=False, default=None), body = dict(required=False, default=None), body_format = dict(required=False, default='raw', choices=['raw', 'json']), - method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH']), + method = dict(required=False, default='GET'), return_content = dict(required=False, default='no', type='bool'), force_basic_auth = dict(required=False, default='no', type='bool'), follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), From 501a665060ab292f6681a829bc4da2cc27a41a5b Mon Sep 17 00:00:00 2001 From: Ian Babrou Date: Sun, 22 Mar 2015 14:16:02 +0300 Subject: [PATCH 036/386] Pulling missing docker image before doing anything --- cloud/docker/docker.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 158b8c8135d..48ab6449f53 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1460,10 +1460,14 @@ def main(): if count > 1 and name: module.fail_json(msg="Count and name must not be used together") - # Explicitly pull new container images, if requested. - # Do this before noticing running and deployed containers so that the image names will differ - # if a newer image has been pulled. - if pull == "always": + # Explicitly pull new container images, if requested. Do this before + # noticing running and deployed containers so that the image names + # will differ if a newer image has been pulled. + # Missing images should be pulled first to avoid downtime when old + # container is stopped, but image for new one is now downloaded yet. + # It also prevents removal of running container before realizing + # that requested image cannot be retrieved. + if pull == "always" or (state == 'reloaded' and manager.get_inspect_image() is None): manager.pull_image() containers = ContainerSet(manager) From fff29f049e1f7b2103f4527ae440c92950ade6b0 Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Fri, 8 May 2015 01:40:10 +0900 Subject: [PATCH 037/386] Not use "is" to compare strings As "is" tests whether if operands are the same object rather than they have the same value, potentially causes a wrong result. --- system/service.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/system/service.py b/system/service.py index 3589340f152..5627f128c92 100644 --- a/system/service.py +++ b/system/service.py @@ -862,7 +862,7 @@ class LinuxService(Service): if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed: self.execute_command("%s zap" % svc_cmd, daemonize=True) - if self.action is not "restart": + if self.action != "restart": if svc_cmd != '': # upstart or systemd or OpenRC rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True) @@ -970,11 +970,11 @@ class FreeBsdService(Service): def service_control(self): - if self.action is "start": + if self.action == "start": self.action = "onestart" - if self.action is "stop": + if self.action == "stop": self.action = "onestop" - if self.action is "reload": + if self.action == "reload": self.action = "onereload" return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments)) @@ -1180,9 +1180,9 @@ class NetBsdService(Service): self.running = True def service_control(self): - if self.action is "start": + if self.action == "start": self.action = "onestart" - if self.action is "stop": + if self.action == "stop": self.action = "onestop" self.svc_cmd = "%s" % self.svc_initscript From c3cb39dfa02d587315217a24fbf909d8b2ddca71 Mon Sep 17 00:00:00 2001 From: 0tmc Date: Fri, 8 May 2015 12:22:37 +0300 Subject: [PATCH 038/386] Use of proper fstab file on FreeBSD --- system/mount.py | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/system/mount.py b/system/mount.py index e11d497220b..eed72d2d7c9 100644 --- a/system/mount.py +++ b/system/mount.py @@ -206,13 +206,29 @@ def unset_mount(**kwargs): def mount(module, **kwargs): """ mount up a path or remount if needed """ + + # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab + args = dict( + opts = 'default', + dump = '0', + passno = '0', + fstab = '/etc/fstab' + ) + args.update(kwargs) + mount_bin = module.get_bin_path('mount') name = kwargs['name'] + + cmd = [ mount_bin, ] + if os.path.ismount(name): - cmd = [ mount_bin , '-o', 'remount', name ] - else: - cmd = [ mount_bin, name ] + cmd += [ '-o', 'remount', ] + + if get_platform().lower() == 'freebsd': + cmd += [ '-F', args['fstab'], ] + + cmd += [ name, ] rc, out, err = module.run_command(cmd) if rc == 0: From f620a0ac41c82edc1e894734f4e0c4dbf63dfd3f Mon Sep 17 00:00:00 2001 From: HelenaTian Date: Tue, 12 May 2015 20:53:36 -0700 Subject: [PATCH 039/386] Update gce.py to correctly handle propagated metadata type from a mother template My project is using Ansible to automate cloud build process. Ansible has a core module gce.py for managing GCE instances. We've come across a use case that's not yet supported - when executing ansible-playbook, if a child template is included, then metadata which is defined in and propagated from the mother template is treated as string type and not parsed correctly(which instead is dictionary type), and triggers release flow failure. We currently put some fix by explicitly casting metadata to string type in our own branch, but would like to contribute the fix to Ansible so that everyone onboarding GCE and using Ansible for release management could benefit from it, or hear owner's opinion on fixing the issue if there's a better fix in owner's mind:) --- cloud/google/gce.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 314f1200161..be9e6818c76 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -317,7 +317,7 @@ def create_instances(module, gce, instance_names): # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] if metadata: try: - md = literal_eval(metadata) + md = literal_eval(str(metadata)) if not isinstance(md, dict): raise ValueError('metadata must be a dict') except ValueError, e: From 3d1b6285c3b429f59aad4cfc5b5a5eb629779d4b Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 22 May 2015 18:21:56 +0100 Subject: [PATCH 040/386] Update vsphere_guest.py Clear up confusing action where disk and hardware state can be passed to a reconfigure but is ignored (module only changes CPU and RAM when state is 'reconfigured' and ignore everything else. --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index a33fd52ea70..175ed35724b 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -65,7 +65,7 @@ options: default: null state: description: - - Indicate desired state of the vm. + - Indicate desired state of the vm. 'reconfigured' only applies changes to 'memory_mb' and 'num_cpus' in vm_hardware parameter, and only when hot-plugging is enabled for the guest. default: present choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] from_template: From 6a04f0d45288a9bad860ec28ca5684fe6edee347 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 22 May 2015 18:40:35 +0100 Subject: [PATCH 041/386] Update vsphere_guest.py Clarifies that when using a template, CPU, RAM, NIC and Disk params are silently ignored. --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 175ed35724b..20b41942ed6 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -71,7 +71,7 @@ options: from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (cannot be ran with state) + - Specifies if the VM should be deployed from a template (annot be run with state). Only accepts 'cluster' and 'resource_pool' params. No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied this way. default: no choices: ['yes', 'no'] template_src: From 14d7073b517aff1121b907213016490971d49754 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 22 May 2015 18:43:20 +0100 Subject: [PATCH 042/386] Update vsphere_guest.py Remove etc. As reconfigure only reconfigures RAM and CPU. --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 20b41942ed6..7faee2303d5 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -195,7 +195,7 @@ EXAMPLES = ''' hostname: esx001.mydomain.local # Deploy a guest from a template -# No reconfiguration of the destination guest is done at this stage, a reconfigure would be needed to adjust memory/cpu etc.. +# No reconfiguration of the destination guest is done at this stage, a reconfigure is needed to adjust RAM/CPU. - vsphere_guest: vcenter_hostname: vcenter.mydomain.local username: myuser From 0ec4c43931c200dc289798a5efea494c8b5dba37 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 22 May 2015 19:05:25 +0100 Subject: [PATCH 043/386] Update vsphere_guest.py Spelling mistake. --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 7faee2303d5..93fb8961f56 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -71,7 +71,7 @@ options: from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (annot be run with state). Only accepts 'cluster' and 'resource_pool' params. No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied this way. + - Specifies if the VM should be deployed from a template (cannot be run with state). Only accepts 'cluster' and 'resource_pool' params. No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied this way. default: no choices: ['yes', 'no'] template_src: From 7325d6a8ef43e5dee9bbffab71ebfa4323895ab8 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 22 May 2015 19:07:28 +0100 Subject: [PATCH 044/386] Update vsphere_guest.py Remove redundant description. --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 93fb8961f56..8e1aa686701 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -71,7 +71,7 @@ options: from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (cannot be run with state). Only accepts 'cluster' and 'resource_pool' params. No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied this way. + - Specifies if the VM should be deployed from a template (cannot be run with state). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. default: no choices: ['yes', 'no'] template_src: From cc221aa1a2d1fac9d909d28feda246e06353c3d9 Mon Sep 17 00:00:00 2001 From: Alan Scherger Date: Wed, 27 May 2015 23:12:34 -0500 Subject: [PATCH 045/386] fix docs; only delete network if fwname is not provided --- cloud/google/gce_net.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 079891c5e10..fb9a186f66c 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -75,7 +75,7 @@ options: aliases: [] state: description: - - desired state of the persistent disk + - desired state of the network or firewall required: false default: "present" choices: ["active", "present", "absent", "deleted"] @@ -264,7 +264,7 @@ def main(): if fw: gce.ex_destroy_firewall(fw) changed = True - if name: + elif name: json_output['name'] = name network = None try: From c64a3eb03d0f9adb513c7ad4e8e0b222c0052e7e Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Thu, 28 May 2015 19:08:52 +0100 Subject: [PATCH 046/386] Update vsphere_guest.py --- cloud/vmware/vsphere_guest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 8e1aa686701..8d1b7946688 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -195,7 +195,6 @@ EXAMPLES = ''' hostname: esx001.mydomain.local # Deploy a guest from a template -# No reconfiguration of the destination guest is done at this stage, a reconfigure is needed to adjust RAM/CPU. - vsphere_guest: vcenter_hostname: vcenter.mydomain.local username: myuser From 06aeabc5fb2739d2b07612246b517760ca676e11 Mon Sep 17 00:00:00 2001 From: Constantin Date: Fri, 5 Jun 2015 14:35:26 +0100 Subject: [PATCH 047/386] Added additional stat output values Included in the output as well: - file extension - file attributes - file owner - creation, last access and last write timestamps (in UNIX format) --- windows/win_stat.ps1 | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 10101a62b30..51c9c827093 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -19,6 +19,11 @@ $params = Parse-Args $args; +function Date_To_Timestamp($start_date, $end_date) +{ + Write-Output (New-TimeSpan -Start $start_date -End $end_date).TotalSeconds +} + $path = Get-Attr $params "path" $FALSE; If ($path -eq $FALSE) { @@ -36,6 +41,7 @@ If (Test-Path $path) { Set-Attr $result.stat "exists" $TRUE; $info = Get-Item $path; + $epoch_date = Get-Date -Date "01/01/1970" If ($info.Directory) # Only files have the .Directory attribute. { Set-Attr $result.stat "isdir" $FALSE; @@ -45,6 +51,12 @@ If (Test-Path $path) { Set-Attr $result.stat "isdir" $TRUE; } + Set-Attr $result.stat "extension" $info.Extension; + Set-Attr $result.stat "attributes" $info.Attributes.ToString(); + Set-Attr $result.stat "owner" $info.GetAccessControl().Owner; + Set-Attr $result.stat "creationtime" (Date_To_Timestamp $epoch_date $info.CreationTime); + Set-Attr $result.stat "lastaccesstime" (Date_To_Timestamp $epoch_date $info.LastAccessTime); + Set-Attr $result.stat "lastwritetime" (Date_To_Timestamp $epoch_date $info.LastWriteTime); } Else { From 361a1e1b65e65fff29a9fb8555e7559b54545e9e Mon Sep 17 00:00:00 2001 From: Igor Khomyakov Date: Fri, 9 Jan 2015 16:57:20 +0300 Subject: [PATCH 048/386] Check if a service exists --- web_infrastructure/supervisorctl.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index f75992b9a6a..f0cfa691c27 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -30,7 +30,7 @@ version_added: "0.7" options: name: description: - - The name of the supervisord program or group to manage. + - The name of the supervisord program or group to manage. - The name will be taken as group name when it ends with a colon I(:) - Group support is only available in Ansible version 1.6 or later. required: true @@ -192,9 +192,14 @@ def main(): if state == 'restarted': rc, out, err = run_supervisorctl('update') processes = get_matched_processes() + if not processes: + module.fail_json(name=name, msg="ERROR (no such process)") + take_action_on_processes(processes, lambda s: True, 'restart', 'started') processes = get_matched_processes() + if not processes: + module.fail_json(name=name, msg="ERROR (no such process)") if state == 'present': if len(processes) > 0: From 9074aa4c937c2618136e703703118cbda88030bf Mon Sep 17 00:00:00 2001 From: Tal Auslander Date: Mon, 8 Jun 2015 14:45:20 +0300 Subject: [PATCH 049/386] win_get_url re-download file only if modified --- windows/win_get_url.ps1 | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index b555cc7a52c..96189d69113 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -1,7 +1,7 @@ #!powershell # This file is part of Ansible. # -# Copyright 2014, Paul Durivage +# (c)) 2015, Paul Durivage , Tal Auslander # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -40,14 +40,38 @@ Else { Fail-Json $result "missing required argument: dest" } -$client = New-Object System.Net.WebClient +If (-not (Test-Path $dest)) { + $client = New-Object System.Net.WebClient -Try { - $client.DownloadFile($url, $dest) - $result.changed = $true + Try { + $client.DownloadFile($url, $dest) + $result.changed = $true + } + Catch { + Fail-Json $result "Error downloading $url to $dest" + } } -Catch { - Fail-Json $result "Error downloading $url to $dest" +Else { + Try { + $webRequest = [System.Net.HttpWebRequest]::Create($url) + $webRequest.IfModifiedSince = ([System.IO.FileInfo]$dest).LastWriteTime + $webRequest.Method = "GET" + [System.Net.HttpWebResponse]$webResponse = $webRequest.GetResponse() + + $stream = New-Object System.IO.StreamReader($response.GetResponseStream()) + + $stream.ReadToEnd() | Set-Content -Path $dest -Force + + $result.changed = $true + } + Catch [System.Net.WebException] { + If ($_.Exception.Response.StatusCode -ne [System.Net.HttpStatusCode]::NotModified) { + Fail-Json $result "Error downloading $url to $dest" + } + } + Catch { + Fail-Json $result "Error downloading $url to $dest" + } } Set-Attr $result.win_get_url "url" $url From 1730764531f07d028a9c4e96f20f1c03a1949606 Mon Sep 17 00:00:00 2001 From: Tal Auslander Date: Tue, 9 Jun 2015 11:14:44 +0300 Subject: [PATCH 050/386] add force parameter to win_get_url --- windows/win_get_url.ps1 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 96189d69113..e5e1ea73c83 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -40,7 +40,9 @@ Else { Fail-Json $result "missing required argument: dest" } -If (-not (Test-Path $dest)) { +$force = Get-Attr -obj $params -name "force" "no" + +If ($force -eq "yes" -or -not (Test-Path $dest)) { $client = New-Object System.Net.WebClient Try { From 558f2ace1f3448dd50c17d38de9a50f5850c050a Mon Sep 17 00:00:00 2001 From: Ed Hein Date: Fri, 12 Jun 2015 12:36:52 +0200 Subject: [PATCH 051/386] Fix computation of port bindings. Port bindings configuration can be a list if several host ports are bound to the same guest port. --- cloud/docker/docker.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index cb6d3dae075..b04b6ee335a 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1041,15 +1041,14 @@ class DockerManager(object): for container_port, config in self.port_bindings.iteritems(): if isinstance(container_port, int): container_port = "{0}/tcp".format(container_port) - bind = {} if len(config) == 1: - bind['HostIp'] = "0.0.0.0" - bind['HostPort'] = "" + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for hostip, hostport in config: + expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)}) else: - bind['HostIp'] = config[0] - bind['HostPort'] = str(config[1]) - - expected_bound_ports[container_port] = [bind] + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] actual_bound_ports = container['HostConfig']['PortBindings'] or {} From ff0fc73d64cd2467246435097bf25416e4e1cc7e Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 16:37:07 -0400 Subject: [PATCH 052/386] Add OpenStack Floating IP Module Also deprecate the two old quantum floating ip modules. --- ...floating_ip.py => _quantum_floating_ip.py} | 1 + ...e.py => _quantum_floating_ip_associate.py} | 1 + cloud/openstack/os_floating_ip.py | 245 ++++++++++++++++++ 3 files changed, 247 insertions(+) rename cloud/openstack/{quantum_floating_ip.py => _quantum_floating_ip.py} (99%) rename cloud/openstack/{quantum_floating_ip_associate.py => _quantum_floating_ip_associate.py} (99%) create mode 100644 cloud/openstack/os_floating_ip.py diff --git a/cloud/openstack/quantum_floating_ip.py b/cloud/openstack/_quantum_floating_ip.py similarity index 99% rename from cloud/openstack/quantum_floating_ip.py rename to cloud/openstack/_quantum_floating_ip.py index b7599da0725..5220d307844 100644 --- a/cloud/openstack/quantum_floating_ip.py +++ b/cloud/openstack/_quantum_floating_ip.py @@ -36,6 +36,7 @@ version_added: "1.2" author: - "Benno Joy (@bennojoy)" - "Brad P. Crochet (@bcrochet)" +deprecated: Deprecated in 2.0. Use os_floating_ip instead short_description: Add/Remove floating IP from an instance description: - Add or Remove a floating IP to an instance diff --git a/cloud/openstack/quantum_floating_ip_associate.py b/cloud/openstack/_quantum_floating_ip_associate.py similarity index 99% rename from cloud/openstack/quantum_floating_ip_associate.py rename to cloud/openstack/_quantum_floating_ip_associate.py index a5f39dec133..8960e247b0f 100644 --- a/cloud/openstack/quantum_floating_ip_associate.py +++ b/cloud/openstack/_quantum_floating_ip_associate.py @@ -33,6 +33,7 @@ DOCUMENTATION = ''' module: quantum_floating_ip_associate version_added: "1.2" author: "Benno Joy (@bennojoy)" +deprecated: Deprecated in 2.0. Use os_floating_ip instead short_description: Associate or disassociate a particular floating IP with an instance description: - Associates or disassociates a specific floating IP with a particular instance diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py new file mode 100644 index 00000000000..2d939a9bcd7 --- /dev/null +++ b/cloud/openstack/os_floating_ip.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_floating_ip +version_added: "2.0" +short_description: Add/Remove floating IP from an instance +extends_documentation_fragment: openstack +description: + - Add or Remove a floating IP to an instance +options: + server: + description: + - The name or ID of the instance to which the IP address + should be assigned. + required: true + network_name: + description: + - Name of the network from which IP has to be assigned to VM. + Please make sure the network is an external network. + - Required if ip_address is not given. + required: true + default: None + internal_network_name: + description: + - Name of the network of the port to associate with the floating ip. + Necessary when VM multiple networks. + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + required: false + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Assign a floating ip to the instance from an external network +- os_floating_ip: + cloud: mordred + state: present + server: vm1 + network_name: external_network + internal_network_name: internal_network +''' + + +def _get_server_state(module, cloud): + info = None + server = cloud.get_server(module.params['server']) + if server: + info = server._info + status = info['status'] + if status != 'ACTIVE' and module.params['state'] == 'present': + module.fail_json( + msg="The VM is available but not Active. State: %s" % status + ) + return info, server + + +def _get_port_info(neutron, module, instance_id, internal_network_name=None): + subnet_id = None + if internal_network_name: + kwargs = {'name': internal_network_name} + networks = neutron.list_networks(**kwargs) + network_id = networks['networks'][0]['id'] + kwargs = { + 'network_id': network_id, + 'ip_version': 4 + } + subnets = neutron.list_subnets(**kwargs) + subnet_id = subnets['subnets'][0]['id'] + + kwargs = { + 'device_id': instance_id, + } + try: + ports = neutron.list_ports(**kwargs) + except Exception, e: + module.fail_json(msg="Error in listing ports: %s" % e.message) + + if subnet_id: + port = next(port for port in ports['ports'] if port['fixed_ips'][0]['subnet_id'] == subnet_id) + port_id = port['id'] + fixed_ip_address = port['fixed_ips'][0]['ip_address'] + else: + port_id = ports['ports'][0]['id'] + fixed_ip_address = ports['ports'][0]['fixed_ips'][0]['ip_address'] + + if not ports['ports']: + return None, None + return fixed_ip_address, port_id + + +def _get_floating_ip(neutron, module, fixed_ip_address): + kwargs = { + 'fixed_ip_address': fixed_ip_address + } + try: + ips = neutron.list_floatingips(**kwargs) + except Exception, e: + module.fail_json( + msg="Error in fetching the floatingips's %s" % e.message + ) + + if not ips['floatingips']: + return None, None + + return (ips['floatingips'][0]['id'], + ips['floatingips'][0]['floating_ip_address']) + + +def _create_and_associate_floating_ip(neutron, module, port_id, + net_id, fixed_ip): + kwargs = { + 'port_id': port_id, + 'floating_network_id': net_id, + 'fixed_ip_address': fixed_ip + } + + try: + result = neutron.create_floatingip({'floatingip': kwargs}) + except Exception, e: + module.fail_json( + msg="Error in updating the floating ip address: %s" % e.message + ) + + module.exit_json( + changed=True, + result=result, + public_ip=result['floatingip']['floating_ip_address'] + ) + + +def _get_public_net_id(neutron, module): + kwargs = { + 'name': module.params['network_name'], + } + try: + networks = neutron.list_networks(**kwargs) + except Exception, e: + module.fail_json("Error in listing neutron networks: %s" % e.message) + if not networks['networks']: + return None + return networks['networks'][0]['id'] + + +def _update_floating_ip(neutron, module, port_id, floating_ip_id): + kwargs = { + 'port_id': port_id + } + try: + result = neutron.update_floatingip(floating_ip_id, + {'floatingip': kwargs}) + except Exception, e: + module.fail_json( + msg="Error in updating the floating ip address: %s" % e.message + ) + module.exit_json(changed=True, result=result) + + +def main(): + argument_spec = openstack_full_argument_spec( + server = dict(required=True), + network_name = dict(required=True), + internal_network_name = dict(default=None), + state = dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + internal_network_name = module.params['internal_network_name'] + + try: + cloud = shade.openstack_cloud(**module.params) + neutron = cloud.neutron_client + + server_info, server_obj = _get_server_state(module, cloud) + if not server_info: + module.fail_json(msg="The server provided cannot be found") + + fixed_ip, port_id = _get_port_info( + neutron, module, server_info['id'], internal_network_name) + if not port_id: + module.fail_json(msg="Cannot find a port for this instance," + " maybe fixed ip is not assigned") + + floating_id, floating_ip = _get_floating_ip(neutron, module, fixed_ip) + + if state == 'present': + if floating_ip: + # This server already has a floating IP assigned + module.exit_json(changed=False, public_ip=floating_ip) + + pub_net_id = _get_public_net_id(neutron, module) + if not pub_net_id: + module.fail_json( + msg="Cannot find the public network specified" + ) + _create_and_associate_floating_ip(neutron, module, port_id, + pub_net_id, fixed_ip) + + elif state == 'absent': + if floating_ip: + _update_floating_ip(neutron, module, None, floating_id) + module.exit_json(changed=False) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From dadc1faebd9a177f66f39830d4c65efe9d559870 Mon Sep 17 00:00:00 2001 From: Konstantin Gribov Date: Tue, 2 Jun 2015 16:14:07 +0300 Subject: [PATCH 053/386] Escape spaces, backslashes and ampersands in fstab Fixes #530. It's more generic than #578 which only fixes spaces escaping in name (target dir to mount). Escaping is used in both `set_mount` (important for `src`, `name` and `opts`) and `unset_mount` (for `name`). It's shouldn't be used in `mount` and `umount` since `name` parameter is passed as array element to `module.run_command`. Signed-off-by: Konstantin Gribov --- system/mount.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/system/mount.py b/system/mount.py index e11d497220b..d41d1f936e2 100644 --- a/system/mount.py +++ b/system/mount.py @@ -102,6 +102,10 @@ def write_fstab(lines, dest): fs_w.flush() fs_w.close() +def _escape_fstab(v): + """ escape space (040), ampersand (046) and backslash (134) which are invalid in fstab fields """ + return v.replace('\\', '\\134').replace(' ', '\\040').replace('&', '\\046') + def set_mount(**kwargs): """ set/change a mount point location in fstab """ @@ -119,6 +123,7 @@ def set_mount(**kwargs): to_write = [] exists = False changed = False + escaped_args = dict([(k, _escape_fstab(v)) for k, v in args.iteritems()]) for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) @@ -135,16 +140,16 @@ def set_mount(**kwargs): ld = {} ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - if ld['name'] != args['name']: + if ld['name'] != escaped_args['name']: to_write.append(line) continue # it exists - now see if what we have is different exists = True for t in ('src', 'fstype','opts', 'dump', 'passno'): - if ld[t] != args[t]: + if ld[t] != escaped_args[t]: changed = True - ld[t] = args[t] + ld[t] = escaped_args[t] if changed: to_write.append(new_line % ld) @@ -175,6 +180,7 @@ def unset_mount(**kwargs): to_write = [] changed = False + escaped_name = _escape_fstab(args['name']) for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) @@ -191,7 +197,7 @@ def unset_mount(**kwargs): ld = {} ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - if ld['name'] != args['name']: + if ld['name'] != escaped_name: to_write.append(line) continue @@ -260,8 +266,6 @@ def main(): args['passno'] = module.params['passno'] if module.params['opts'] is not None: args['opts'] = module.params['opts'] - if ' ' in args['opts']: - module.fail_json(msg="unexpected space in 'opts' parameter") if module.params['dump'] is not None: args['dump'] = module.params['dump'] if module.params['fstab'] is not None: From 5b84b0d136c75ca964d1fcbb8ddc2359a98f9cfe Mon Sep 17 00:00:00 2001 From: acaveroc Date: Wed, 17 Jun 2015 10:37:47 +0200 Subject: [PATCH 054/386] Add port definition support for mysql_vars module --- database/mysql/mysql_variables.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 0b0face0328..753d37433e3 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -52,6 +52,10 @@ options: description: - mysql host to connect required: False + login_port: + description: + - mysql port to connect + required: False login_unix_socket: description: - unix socket to connect mysql server @@ -194,6 +198,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), + login_port=dict(default="3306"), login_unix_socket=dict(default=None), variable=dict(default=None), value=dict(default=None) @@ -203,6 +208,7 @@ def main(): user = module.params["login_user"] password = module.params["login_password"] host = module.params["login_host"] + port = module.params["login_port"] mysqlvar = module.params["variable"] value = module.params["value"] if not mysqldb_found: @@ -227,9 +233,9 @@ def main(): module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") try: if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") cursor = db_connection.cursor() except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") From 7d2a5965bd5fc95196a3d3427acaa5ca086e4e80 Mon Sep 17 00:00:00 2001 From: acaveroc Date: Wed, 17 Jun 2015 13:53:08 +0200 Subject: [PATCH 055/386] Assorted minor bug fixes - Modified data type for port definition from string to integer - Modified login_host default value for compatibilize with port definition according with MySQL Documentation (https://dev.mysql.com/doc/refman/5.0/en/connecting.html) --- database/mysql/mysql_variables.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 753d37433e3..36415df5460 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -197,7 +197,7 @@ def main(): argument_spec = dict( login_user=dict(default=None), login_password=dict(default=None), - login_host=dict(default="localhost"), + login_host=dict(default="127.0.0.1"), login_port=dict(default="3306"), login_unix_socket=dict(default=None), variable=dict(default=None), @@ -233,9 +233,9 @@ def main(): module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") try: if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db="mysql") cursor = db_connection.cursor() except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") From e7876df99f7e68467c0c6da0939fb5ba07f9ee14 Mon Sep 17 00:00:00 2001 From: acaveroc Date: Thu, 18 Jun 2015 09:43:32 +0200 Subject: [PATCH 056/386] Add version_added and type of parameter --- database/mysql/mysql_variables.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 36415df5460..f50ed740539 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -53,6 +53,7 @@ options: - mysql host to connect required: False login_port: + version_added: "1.9" description: - mysql port to connect required: False @@ -198,7 +199,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="127.0.0.1"), - login_port=dict(default="3306"), + login_port=dict(default="3306", type='int'), login_unix_socket=dict(default=None), variable=dict(default=None), value=dict(default=None) @@ -233,9 +234,9 @@ def main(): module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") try: if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") cursor = db_connection.cursor() except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") From 759c2de7f98b3bf0979cafb804df982d27dcf5fd Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 18 Jun 2015 07:56:50 -0400 Subject: [PATCH 057/386] Add filter ability --- cloud/openstack/os_client_config.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 100608b0fd0..a12cd8fe65a 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -25,6 +25,15 @@ short_description: Get OpenStack Client config description: - Get I(openstack) client config data from clouds.yaml or environment version_added: "2.0" +notes: + - Facts are placed in the C(openstack.clouds) variable. +options: + clouds: + description: + - List of clouds to limit the return list to. No value means return + information on all configured clouds + required: false + default: [] requirements: [ os-client-config ] author: "Monty Taylor (@emonty)" ''' @@ -34,19 +43,27 @@ EXAMPLES = ''' - os-client-config: - debug: var={{ item }} with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}" + +# Get the information back just about the mordred cloud +- os-client-config: + clouds: + - mordred ''' def main(): - module = AnsibleModule({}) + module = AnsibleModule({ + clouds=dict(required=False, default=[]), + }) p = module.params try: config = os_client_config.OpenStackConfig() clouds = [] for cloud in config.get_all_clouds(): - cloud.config['name'] = cloud.name - clouds.append(cloud.config) + if not module.params['clouds'] or cloud.name in module.param['clouds']: + cloud.config['name'] = cloud.name + clouds.append(cloud.config) module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds))) except exceptions.OpenStackConfigException as e: module.fail_json(msg=str(e)) From a226701efe836e3c288a1624dfd820928dcd0c16 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 16:37:07 -0400 Subject: [PATCH 058/386] Add OpenStack Keypair module Also deprecate old nova_keypair module. --- .../{nova_keypair.py => _nova_keypair.py} | 1 + cloud/openstack/os_keypair.py | 140 ++++++++++++++++++ 2 files changed, 141 insertions(+) rename cloud/openstack/{nova_keypair.py => _nova_keypair.py} (99%) create mode 100644 cloud/openstack/os_keypair.py diff --git a/cloud/openstack/nova_keypair.py b/cloud/openstack/_nova_keypair.py similarity index 99% rename from cloud/openstack/nova_keypair.py rename to cloud/openstack/_nova_keypair.py index b2e38ff7db9..68df0c5a2c4 100644 --- a/cloud/openstack/nova_keypair.py +++ b/cloud/openstack/_nova_keypair.py @@ -32,6 +32,7 @@ version_added: "1.2" author: - "Benno Joy (@bennojoy)" - "Michael DeHaan" +deprecated: Deprecated in 2.0. Use os_keypair instead short_description: Add/Delete key pair from nova description: - Add or Remove key pair from nova . diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py new file mode 100644 index 00000000000..c4725552725 --- /dev/null +++ b/cloud/openstack/os_keypair.py @@ -0,0 +1,140 @@ +#!/usr/bin/python + +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# Copyright (c) 2013, John Dewey +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_keypair +short_description: Add/Delete a keypair from OpenStack +extends_documentation_fragment: openstack +version_added: "2.0" +description: + - Add or Remove key pair from OpenStack +options: + name: + description: + - Name that has to be given to the key pair + required: true + default: None + public_key: + description: + - The public key that would be uploaded to nova and injected to vm's upon creation + required: false + default: None + public_key_file: + description: + - Path to local file containing ssh public key. Mutually exclusive with public_key + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Creates a key pair with the running users public key +- os_keypair: + cloud: mordred + state: present + name: ansible_key + public_key: "{{ lookup('file','~/.ssh/id_rsa.pub') }}" + +# Creates a new key pair and the private key returned after the run. +- os_keypair: + cloud: rax-dfw + state: present + name: ansible_key +''' + + +def main(): + argument_spec = openstack_full_argument_spec( + name = dict(required=True), + public_key = dict(default=None), + public_key_file = dict(default=None), + state = dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[['public_key', 'public_key_file']]) + module = AnsibleModule(argument_spec, **module_kwargs) + + if module.params['public_key_file']: + public_key = open(module.params['public_key_file']).read() + else: + public_key = module.params['public_key'] + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + name = module.params['name'] + public_key = module.params['public_key'] + + try: + cloud = shade.openstack_cloud(**module.params) + + if state == 'present': + for key in cloud.list_keypairs(): + if key.name == name: + if public_key and (public_key != key.public_key): + module.fail_json( + msg="Key name %s present but key hash not the same" + " as offered. Delete key first." % key.name + ) + else: + module.exit_json(changed=False, result="Key present") + try: + key = cloud.create_keypair(name, public_key) + except Exception, e: + module.exit_json( + msg="Error in creating the keypair: %s" % e.message + ) + if not public_key: + module.exit_json(changed=True, key=key.private_key) + module.exit_json(changed=True, key=None) + + elif state == 'absent': + for key in cloud.list_keypairs(): + if key.name == name: + try: + cloud.delete_keypair(name) + except Exception, e: + module.fail_json( + msg="Keypair deletion has failed: %s" % e.message + ) + module.exit_json(changed=True, result="deleted") + module.exit_json(changed=False, result="not present") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From 82dc5c4394ab88e055debed6b0d7d397f11638d7 Mon Sep 17 00:00:00 2001 From: Davide Guerri Date: Thu, 4 Jun 2015 19:30:34 +0100 Subject: [PATCH 059/386] Avoind using lookup() in documentation lookup() is currently broken (current Ansible devel branch), so better to avoid it in our examples. --- cloud/openstack/os_keypair.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index c4725552725..c6794b47826 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -63,7 +63,7 @@ EXAMPLES = ''' cloud: mordred state: present name: ansible_key - public_key: "{{ lookup('file','~/.ssh/id_rsa.pub') }}" + public_key_file: ~/.ssh/id_rsa.pub # Creates a new key pair and the private key returned after the run. - os_keypair: From 02d0a73906bcd6e1c8805825a23b49df027c65a9 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 18 Jun 2015 07:59:32 -0400 Subject: [PATCH 060/386] Move the order of argument processing --- cloud/openstack/os_keypair.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index c6794b47826..b404e6cc02a 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -84,18 +84,16 @@ def main(): mutually_exclusive=[['public_key', 'public_key_file']]) module = AnsibleModule(argument_spec, **module_kwargs) - if module.params['public_key_file']: - public_key = open(module.params['public_key_file']).read() - else: - public_key = module.params['public_key'] - - if not HAS_SHADE: - module.fail_json(msg='shade is required for this module') - state = module.params['state'] name = module.params['name'] public_key = module.params['public_key'] + if module.params['public_key_file']: + public_key = open(module.params['public_key_file']).read() + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + try: cloud = shade.openstack_cloud(**module.params) From 3b4b065315072207537d01770a79584e2a01d0a4 Mon Sep 17 00:00:00 2001 From: Bryan Fleming Date: Wed, 6 May 2015 16:44:40 -0500 Subject: [PATCH 061/386] fixes #1120 - privileges using columns --- database/mysql/mysql_user.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 763e0e7ebd5..0ff290f1185 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -157,6 +157,7 @@ password=n<_665{vS43y import getpass import tempfile +import re try: import MySQLdb except ImportError: @@ -316,13 +317,19 @@ def privileges_unpack(priv): not specified in the string, as MySQL will always provide this by default. """ output = {} + privs = [] for item in priv.strip().split('/'): pieces = item.strip().split(':') dbpriv = pieces[0].rsplit(".", 1) pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1]) - - output[pieces[0]] = [s.strip() for s in pieces[1].upper().split(',')] - new_privs = frozenset(output[pieces[0]]) + if '(' in pieces[1]: + output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper()) + for i in output[pieces[0]]: + privs.append(re.sub(r'\(.*\)','',i)) + else: + output[pieces[0]] = pieces[1].upper().split(',') + privs = output[pieces[0]] + new_privs = frozenset(privs) if not new_privs.issubset(VALID_PRIVS): raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) From cee4ef0fc3a6e4f21ec9787b2ce002aa4a05bd91 Mon Sep 17 00:00:00 2001 From: dagnello Date: Fri, 19 Jun 2015 10:45:12 -0700 Subject: [PATCH 062/386] Resolving secgroup.id issue in this module secgroup['id'] was not being returned in all cases where the specified security group exists. --- cloud/openstack/os_security_group.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 51e7df772a1..86e6de0b023 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -48,6 +48,8 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present + +requirements: ["shade"] ''' EXAMPLES = ''' @@ -114,24 +116,24 @@ def main(): if module.check_mode: module.exit_json(changed=_system_state_change(module, secgroup)) - changed = False if state == 'present': if not secgroup: secgroup = cloud.create_security_group(name, description) - changed = True + module.exit_json(changed=True, id=secgroup['id']) else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - changed = True - module.exit_json( - changed=changed, id=secgroup.id, secgroup=secgroup) + module.exit_json(changed=True, id=secgroup['id']) + else: + module.exit_json(changed=False, id=secgroup['id']) if state == 'absent': - if secgroup: + if not secgroup: + module.exit_json(changed=False) + else: cloud.delete_security_group(secgroup['id']) - changed=True - module.exit_json(changed=changed) + module.exit_json(changed=True) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 887b88ea73aaf1ed81fc15398b004674b15f3ec3 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 19 Jun 2015 17:06:12 -0400 Subject: [PATCH 063/386] Make sure we're always returning objects too --- cloud/openstack/os_security_group.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 86e6de0b023..7fba28c8cb9 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -116,24 +116,24 @@ def main(): if module.check_mode: module.exit_json(changed=_system_state_change(module, secgroup)) + changed = False if state == 'present': if not secgroup: secgroup = cloud.create_security_group(name, description) - module.exit_json(changed=True, id=secgroup['id']) + changed = True else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - module.exit_json(changed=True, id=secgroup['id']) - else: - module.exit_json(changed=False, id=secgroup['id']) + changed = True + module.exit_json( + changed=changed, id=secgroup['id'], secgroup=secgroup) if state == 'absent': - if not secgroup: - module.exit_json(changed=False) - else: + if secgroup: cloud.delete_security_group(secgroup['id']) - module.exit_json(changed=True) + changed = True + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 1ae299d00ff2fe18abd7b1d01b18f384301afccf Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 19 Jun 2015 17:39:57 -0400 Subject: [PATCH 064/386] Remove duplicate shade requirement --- cloud/openstack/os_security_group.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 7fba28c8cb9..e42b7f938f5 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -48,8 +48,6 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present - -requirements: ["shade"] ''' EXAMPLES = ''' From 84fd824f75ca30b0854634f6a99e7c0cdb90a029 Mon Sep 17 00:00:00 2001 From: murdochr Date: Sat, 20 Jun 2015 21:36:10 +0100 Subject: [PATCH 065/386] Change docs to reflect correct when syntax for matching variable strings as per MD's forum post as this fails with unhelpful error otherwise. https://groups.google.com/forum/#!topic/ansible-project/D2hQzZ_jNuM --- network/basics/uri.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index b7fa8282c83..3de17c12d60 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -156,7 +156,7 @@ EXAMPLES = ''' register: webpage - action: fail - when: 'AWESOME' not in "{{ webpage.content }}" + when: "'illustrative' not in webpage.content" # Create a JIRA issue From 87404fa7987b182c1ccc05197656140174da54e2 Mon Sep 17 00:00:00 2001 From: Hagai Kariti Date: Tue, 30 Sep 2014 11:13:54 +0300 Subject: [PATCH 066/386] Hostname module should update ansible_hostname --- system/hostname.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/hostname.py b/system/hostname.py index 882402a5e21..d9193641eb2 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -509,6 +509,6 @@ def main(): hostname.set_permanent_hostname(name) changed = True - module.exit_json(changed=changed, name=name) + module.exit_json(changed=changed, name=name, ansible_facts=dict(ansible_hostname=name)) main() From e93b5c672476b34fd81327c3976c92b74d57c0d7 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Mon, 22 Jun 2015 14:52:45 +0100 Subject: [PATCH 067/386] Parse out space characters in route53 value list Fixes: https://github.com/ansible/ansible-modules-core/issues/992 --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 67700060d9f..d25be6b99ea 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -224,7 +224,7 @@ def main(): if type(value_in) is str: if value_in: - value_list = sorted(value_in.split(',')) + value_list = sorted([s.strip() for s in value_in.split(',')]) elif type(value_in) is list: value_list = sorted(value_in) From 617d5750a6be96d68e5412832a79c0fa8229ffbc Mon Sep 17 00:00:00 2001 From: Andrea Mandolo Date: Tue, 23 Jun 2015 07:14:30 +0200 Subject: [PATCH 068/386] Added "EC2 instance" termination_protection and source_dest_check changeability at run-time --- cloud/amazon/ec2.py | 53 +++++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 20d49ce5995..dc7d5d38dd3 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -190,6 +190,13 @@ options: required: false default: yes choices: [ "yes", "no" ] + termination_protection: + version_added: "2.0" + description: + - Enable or Disable the Termination Protection + required: false + default: no + choices: [ "yes", "no" ] state: version_added: "1.3" description: @@ -786,6 +793,7 @@ def create_instances(module, ec2, vpc, override_count=None): exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') source_dest_check = module.boolean(module.params.get('source_dest_check')) + termination_protection = module.boolean(module.params.get('termination_protection')) # group_id and group_name are exclusive of each other if group_id and group_name: @@ -1014,11 +1022,16 @@ def create_instances(module, ec2, vpc, override_count=None): for res in res_list: running_instances.extend(res.instances) - # Enabled by default by Amazon - if not source_dest_check: + # Enabled by default by AWS + if source_dest_check is False: for inst in res.instances: inst.modify_attribute('sourceDestCheck', False) + # Disabled by default by AWS + if termination_protection is True: + for inst in res.instances: + inst.modify_attribute('disableApiTermination', True) + # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound if instance_tags: try: @@ -1135,21 +1148,32 @@ def startstop_instances(module, ec2, instance_ids, state): if not isinstance(instance_ids, list) or len(instance_ids) < 1: module.fail_json(msg='instance_ids should be a list of instances, aborting') - # Check that our instances are not in the state we want to take them to - # and change them to our desired state + # Check (and eventually change) instances attributes and instances state running_instances_array = [] for res in ec2.get_all_instances(instance_ids): for inst in res.instances: - if inst.state != state: - instance_dict_array.append(get_instance_info(inst)) - try: - if state == 'running': - inst.start() - else: - inst.stop() - except EC2ResponseError, e: - module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) - changed = True + + # Check "source_dest_check" attribute + if inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: + inst.modify_attribute('sourceDestCheck', source_dest_check) + changed = True + + # Check "termination_protection" attribute + if inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection: + inst.modify_attribute('disableApiTermination', termination_protection) + changed = True + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + if state == 'running': + inst.start() + else: + inst.stop() + except EC2ResponseError, e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) + changed = True ## Wait for all the instances to finish starting or stopping wait_timeout = time.time() + wait_timeout @@ -1200,6 +1224,7 @@ def main(): instance_profile_name = dict(), instance_ids = dict(type='list', aliases=['instance_id']), source_dest_check = dict(type='bool', default=True), + termination_protection = dict(type='bool', default=False), state = dict(default='present', choices=['present', 'absent', 'running', 'stopped']), exact_count = dict(type='int', default=None), count_tag = dict(), From 6cdfbb72f01468192965e45ce6c019b2ea44ea65 Mon Sep 17 00:00:00 2001 From: Andrea Mandolo Date: Mon, 22 Jun 2015 17:13:42 +0200 Subject: [PATCH 069/386] Added some block_device_mapping (disks) informations to EC2 instance module ouput --- cloud/amazon/ec2.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index dc7d5d38dd3..ad2f8f8f71b 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -619,6 +619,19 @@ def get_instance_info(inst): except AttributeError: instance_info['ebs_optimized'] = False + try: + bdm_dict = {} + bdm = getattr(inst, 'block_device_mapping') + for device_name in bdm.keys(): + bdm_dict[device_name] = { + 'status': bdm[device_name].status, + 'volume_id': bdm[device_name].volume_id, + 'delete_on_termination': bdm[device_name].delete_on_termination + } + instance_info['block_device_mapping'] = bdm_dict + except AttributeError: + instance_info['block_device_mapping'] = False + try: instance_info['tenancy'] = getattr(inst, 'placement_tenancy') except AttributeError: From 207abb6f5c7e9d1d50dc52e0ee4cc04d192912fa Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Tue, 23 Jun 2015 14:08:43 -0400 Subject: [PATCH 070/386] Add ClassicLink settings to EC2_launchconfig --- cloud/amazon/ec2_lc.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 3c292377a58..6c5e2c1dd4c 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -116,6 +116,18 @@ options: default: false aliases: [] version_added: "1.8" + classic_link_vpc_id: + description: + - Id of ClassicLink enabled VPC + required: false + default: null + version_added: "2.0" + classic_link_vpc_security_groups" + description: + - A list of security group id’s with which to associate the ClassicLink VPC instances. + required: false + default: null + version_added: "2.0" extends_documentation_fragment: aws """ @@ -184,6 +196,8 @@ def create_launch_config(connection, module): ramdisk_id = module.params.get('ramdisk_id') instance_profile_name = module.params.get('instance_profile_name') ebs_optimized = module.params.get('ebs_optimized') + classic_link_vpc_id = module.params.get('classic_link_vpc_id') + classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups') bdm = BlockDeviceMapping() if volumes: @@ -206,10 +220,12 @@ def create_launch_config(connection, module): kernel_id=kernel_id, spot_price=spot_price, instance_monitoring=instance_monitoring, - associate_public_ip_address = assign_public_ip, + associate_public_ip_address=assign_public_ip, ramdisk_id=ramdisk_id, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, + classic_link_vpc_security_groups=classic_link_vpc_security_groups, + classic_link_vpc_id=classic_link_vpc_id, ) launch_configs = connection.get_all_launch_configurations(names=[name]) @@ -257,7 +273,9 @@ def main(): ebs_optimized=dict(default=False, type='bool'), associate_public_ip_address=dict(type='bool'), instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(type='bool') + assign_public_ip=dict(type='bool'), + classic_link_vpc_security_groups=dict(type='list'), + classic_link_vpc_id=dict(type='str') ) ) From c6f9e08282b7eefc2f7f2825df369d0099c2c3b2 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Mon, 13 Apr 2015 21:22:11 -0400 Subject: [PATCH 071/386] new vpc module. does not contain subnet or route table functionality. changed name to ec2_vpc_net refactored out IGW functionality --- cloud/amazon/ec2_vpc_net.py | 344 ++++++++++++++++++++++++++++++++++++ 1 file changed, 344 insertions(+) create mode 100644 cloud/amazon/ec2_vpc_net.py diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py new file mode 100644 index 00000000000..33c711e7683 --- /dev/null +++ b/cloud/amazon/ec2_vpc_net.py @@ -0,0 +1,344 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net +short_description: configure AWS virtual private clouds +description: + - Create or terminates AWS virtual private clouds. This module has a dependency on python-boto. +version_added: "2.0" +options: + name: + description: + - The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists. + required: yes + cidr_block: + description: + - The CIDR of the VPC + required: yes + aliases: [] + tenancy: + description: + - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created. + required: false + default: default + dns_support: + description: + - Whether to enable AWS DNS support. + required: false + default: true + dns_hostnames: + description: + - Whether to enable AWS hostname support. + required: false + default: true + dhcp_id: + description: + - the id of the DHCP options to use for this vpc + default: null + required: false + tags: + description: + - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different. + default: None + required: false + state: + description: + - The state of the VPC. Either absent or present. + default: present + required: false + multi_ok: + description: + - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created. + default: false + required: false +author: Jonathan Davila +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Create a VPC with dedicate tenancy and a couple of tags + +- ec2_vpc: + name: Module_dev2 + cidr_block: 170.10.0.0/16 + region: us-east-1 + tags: + new_vpc: ec2_vpc_module + this: works22 + tenancy: dedicated + +''' + + +import time +import sys + +try: + import boto + import boto.ec2 + import boto.vpc + from boto.exception import EC2ResponseError + + HAS_BOTO=True +except ImportError: + HAS_BOTO=False + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + +def vpc_exists(module, vpc, name, cidr_block, multi): + """Returns True or False in regards to the existance of a VPC. When supplied + with a CIDR, it will check for matching tags to determine if it is a match + otherwise it will assume the VPC does not exist and thus return false. + """ + exists=False + matched_vpc=None + + try: + matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block}) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if len(matching_vpcs) == 1 and not multi: + exists=True + matched_vpc=str(matching_vpcs).split(':')[1].split(']')[0] + elif len(matching_vpcs) > 1 and not multi: + module.fail_json(msg='Currently there are %d VPCs that have the same name and ' + 'CIDR block you specified. If you would like to create ' + 'the VPC anyways please pass True to the multi_ok param.' % len(matching_vpcs)) + + return exists, matched_vpc + +def vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags): + """This returns True or False. Intended to run after vpc_exists. + It will check all the characteristics of the parameters passed and compare them + to the active VPC. If any discrepancy is found, it will report true, meaning that + the VPC needs to be update in order to match the specified state in the params. + """ + + update_dhcp=False + update_tags=False + dhcp_match=False + + try: + dhcp_list=vpc.get_all_dhcp_options() + + if dhcp_id is not None: + has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id}) + for opts in dhcp_list: + if (str(opts).split(':')[1] == dhcp_id) or has_default: + dhcp_match=True + break + else: + pass + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if not dhcp_match or (has_default and dhcp_id != 'default'): + update_dhcp=True + + if dns_hostnames and dns_support == False: + module.fail_json('In order to enable DNS Hostnames you must have DNS support enabled') + else: + + # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute + # which is needed in order to detect the current status of DNS options. For now we just update + # the attribute each time and is not used as a changed-factor. + try: + vpc.modify_vpc_attribute(vpc_id, enable_dns_support=dns_support) + vpc.modify_vpc_attribute(vpc_id, enable_dns_hostnames=dns_hostnames) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if tags: + try: + current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) + if not set(tags.items()).issubset(set(current_tags.items())): + update_tags=True + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + return update_dhcp, update_tags + + +def update_vpc_tags(module, vpc, vpc_id, tags, name): + tags.update({'Name': name}) + try: + vpc.create_tags(vpc_id, tags) + updated_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + return updated_tags + + +def update_dhcp_opts(module, vpc, vpc_id, dhcp_id): + try: + vpc.associate_dhcp_options(dhcp_id, vpc_id) + dhcp_list=vpc.get_all_dhcp_options() + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + for opts in dhcp_list: + vpc_dhcp=vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}) + matched=False + if opts == dhcp_id: + matched=True + return opts + + if matched == False: + return dhcp_id + +def main(): + argument_spec=ec2_argument_spec() + argument_spec.update(dict( + name=dict(type='str', default=None, required=True), + cidr_block=dict(type='str', default=None, required=True), + tenancy=dict(choices=['default', 'dedicated'], default='default'), + dns_support=dict(type='bool', default=True), + dns_hostnames=dict(type='bool', default=True), + dhcp_opts_id=dict(type='str', default=None, required=False), + tags=dict(type='dict', required=False, default=None), + state=dict(choices=['present', 'absent'], default='present'), + region=dict(type='str', required=True), + multi_ok=dict(type='bool', default=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='Boto is required for this module') + + name=module.params.get('name') + cidr_block=module.params.get('cidr_block') + tenancy=module.params.get('tenancy') + dns_support=module.params.get('dns_support') + dns_hostnames=module.params.get('dns_hostnames') + dhcp_id=module.params.get('dhcp_opts_id') + tags=module.params.get('tags') + state=module.params.get('state') + multi=module.params.get('multi_ok') + + changed=False + new_dhcp_opts=None + new_tags=None + update_dhcp=False + update_tags=False + + region, ec2_url, aws_connect_kwargs=get_aws_connection_info(module) + + try: + vpc=boto.vpc.connect_to_region( + region, + **aws_connect_kwargs + ) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + already_exists, vpc_id=vpc_exists(module, vpc, name, cidr_block, multi) + + if already_exists: + update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags) + if update_dhcp or update_tags: + changed=True + + try: + e_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) + dhcp_list=vpc.get_all_dhcp_options() + has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id}) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + dhcp_opts=None + + try: + for opts in dhcp_list: + if vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}): + dhcp_opts=opts + break + else: + pass + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if not dhcp_opts and has_default: + dhcp_opts='default' + + if state == 'present': + + if not changed and already_exists: + module.exit_json(changed=changed, vpc_id=vpc_id) + elif changed: + if update_dhcp: + dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id) + if update_tags: + e_tags=update_vpc_tags(module, vpc, vpc_id, tags, name) + + module.exit_json(changed=changed, name=name, dhcp_options_id=dhcp_opts, tags=e_tags) + + if not already_exists: + try: + vpc_id=str(vpc.create_vpc(cidr_block, instance_tenancy=tenancy)).split(':')[1] + vpc.create_tags(vpc_id, dict(Name=name)) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags) + + if update_dhcp: + new_dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id) + if update_tags: + new_tags=update_vpc_tags(module, vpc, vpc_id, tags, name) + module.exit_json(changed=True, name=name, vpc_id=vpc_id, dhcp_options=new_dhcp_opts, tags=new_tags) + elif state == 'absent': + if already_exists: + changed=True + try: + vpc.delete_vpc(vpc_id) + module.exit_json(changed=changed, vpc_id=vpc_id) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " + "and/or ec2_vpc_rt modules to ensure the other components are absent." % e_msg) + else: + module.exit_json(msg="VPC is absent") +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From 559ad374f573c0dda4c5ecb4cbc7d19a731e9524 Mon Sep 17 00:00:00 2001 From: Michael Weinrich Date: Fri, 19 Jun 2015 17:06:51 +0200 Subject: [PATCH 072/386] Add the option to pass a string as policy --- cloud/amazon/iam_policy.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index f1a6abdd0a6..32a25ae2517 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -40,7 +40,12 @@ options: aliases: [] policy_document: description: - - The path to the properly json formatted policy file + - The path to the properly json formatted policy file (mutually exclusive with C(policy_json)) + required: false + aliases: [] + policy_json: + description: + - A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly) required: false aliases: [] state: @@ -109,6 +114,19 @@ task: state: present with_items: new_groups.results +# Create a new S3 policy with prefix per user +tasks: +- name: Create S3 policy from template + iam_policy: + iam_type: user + iam_name: "{{ item.user }}" + policy_name: "s3_limited_access_{{ item.s3_user_prefix }}" + state: present + policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} " + with_items: + - user: s3_user + prefix: s3_user_prefix + ''' import json import urllib @@ -271,6 +289,7 @@ def main(): iam_name=dict(default=None, required=False), policy_name=dict(default=None, required=True), policy_document=dict(default=None, required=False), + policy_json=dict(type='str', default=None, required=False), skip_duplicates=dict(type='bool', default=True, required=False) )) @@ -284,10 +303,19 @@ def main(): name = module.params.get('iam_name') policy_name = module.params.get('policy_name') skip = module.params.get('skip_duplicates') + + if module.params.get('policy_document') != None and module.params.get('policy_json') != None: + module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set') + if module.params.get('policy_document') != None: with open(module.params.get('policy_document'), 'r') as json_data: pdoc = json.dumps(json.load(json_data)) json_data.close() + elif module.params.get('policy_json') != None: + try: + pdoc = json.dumps(json.loads(module.params.get('policy_json'))) + except Exception as e: + module.fail_json(msg=str(e) + '\n' + module.params.get('policy_json')) else: pdoc=None From 73d5a8a63a9f250da0c867fb2efb927b3b91c183 Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Wed, 24 Jun 2015 11:05:37 -0400 Subject: [PATCH 073/386] Fixing typo --- cloud/amazon/ec2_lc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 6c5e2c1dd4c..818e8efbb50 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -122,7 +122,7 @@ options: required: false default: null version_added: "2.0" - classic_link_vpc_security_groups" + classic_link_vpc_security_groups: description: - A list of security group id’s with which to associate the ClassicLink VPC instances. required: false From 00322c43fc7095e926fff25837343cb700b6a9a2 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Fri, 14 Nov 2014 16:07:29 -0800 Subject: [PATCH 074/386] Add support for listing keys in a specific S3 bucket Includes support for specifying a prefix, marker, and/or max_keys. Returns a list of key names (as strings). --- cloud/amazon/s3.py | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 9bec312294a..ecf35d00f5d 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -64,7 +64,7 @@ options: version_added: "1.6" mode: description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket), delete (bucket), and delobj (delete object). + - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys), create (bucket), delete (bucket), and delobj (delete object). required: true default: null aliases: [] @@ -129,6 +129,12 @@ EXAMPLES = ''' # PUT/upload with metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' +# List keys simple +- s3: bucket=mybucket mode=list + +# List keys all options +- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472 + # Create an empty bucket - s3: bucket=mybucket mode=create @@ -204,6 +210,19 @@ def create_bucket(module, s3, bucket, location=None): if bucket: return True +def get_bucket(module, s3, bucket): + try: + return s3.lookup(bucket) + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + +def list_keys(module, bucket_object, prefix, marker, max_keys): + all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys) + + keys = map((lambda x: x.key), all_keys) + + module.exit_json(msg="LIST operation complete", s3_keys=keys) + def delete_bucket(module, s3, bucket): try: bucket = s3.lookup(bucket) @@ -329,11 +348,14 @@ def main(): dest = dict(default=None), encrypt = dict(default=True, type='bool'), expiry = dict(default=600, aliases=['expiration']), + marker = dict(default=None), + max_keys = dict(default=1000), metadata = dict(type='dict'), - mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj'], required=True), + mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), object = dict(), version = dict(default=None), overwrite = dict(aliases=['force'], default='always'), + prefix = dict(default=None), retries = dict(aliases=['retry'], type='int', default=0), s3_url = dict(aliases=['S3_URL']), src = dict(), @@ -349,11 +371,14 @@ def main(): expiry = int(module.params['expiry']) if module.params.get('dest'): dest = os.path.expanduser(module.params.get('dest')) + marker = module.params.get('marker') + max_keys = module.params.get('max_keys') metadata = module.params.get('metadata') mode = module.params.get('mode') obj = module.params.get('object') version = module.params.get('version') overwrite = module.params.get('overwrite') + prefix = module.params.get('prefix') retries = module.params.get('retries') s3_url = module.params.get('s3_url') src = module.params.get('src') @@ -537,6 +562,16 @@ def main(): else: module.fail_json(msg="Bucket parameter is required.", failed=True) + # Support for listing a set of keys + if mode == 'list': + bucket_object = get_bucket(module, s3, bucket) + + # If the bucket does not exist then bail out + if bucket_object is None: + module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True) + + list_keys(module, bucket_object, prefix, marker, max_keys) + # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. if mode == 'create': From bed420cd531c30c0865bf331c74035494b612a1e Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Thu, 25 Jun 2015 12:19:20 -0400 Subject: [PATCH 075/386] Update os_keypair for latest shade Uses the latest version of shade for cleaner code. Also, always return the key dict whether we create the key, or it already exists. The example using public_key_file is corrected to use a full path since ~ is not converted for us. --- cloud/openstack/os_keypair.py | 80 +++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index b404e6cc02a..a9c2640628f 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -41,12 +41,14 @@ options: default: None public_key: description: - - The public key that would be uploaded to nova and injected to vm's upon creation + - The public key that would be uploaded to nova and injected into VMs + upon creation. required: false default: None public_key_file: description: - - Path to local file containing ssh public key. Mutually exclusive with public_key + - Path to local file containing ssh public key. Mutually exclusive + with public_key. required: false default: None state: @@ -63,7 +65,7 @@ EXAMPLES = ''' cloud: mordred state: present name: ansible_key - public_key_file: ~/.ssh/id_rsa.pub + public_key_file: /home/me/.ssh/id_rsa.pub # Creates a new key pair and the private key returned after the run. - os_keypair: @@ -73,16 +75,33 @@ EXAMPLES = ''' ''' +def _system_state_change(module, keypair): + state = module.params['state'] + if state == 'present' and not keypair: + return True + if state == 'absent' and keypair: + return True + return False + + def main(): argument_spec = openstack_full_argument_spec( name = dict(required=True), public_key = dict(default=None), public_key_file = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), + state = dict(default='present', + choices=['absent', 'present']), ) + module_kwargs = openstack_module_kwargs( mutually_exclusive=[['public_key', 'public_key_file']]) - module = AnsibleModule(argument_spec, **module_kwargs) + + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') state = module.params['state'] name = module.params['name'] @@ -90,44 +109,33 @@ def main(): if module.params['public_key_file']: public_key = open(module.params['public_key_file']).read() - - if not HAS_SHADE: - module.fail_json(msg='shade is required for this module') + public_key = public_key.rstrip() try: cloud = shade.openstack_cloud(**module.params) + keypair = cloud.get_keypair(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, keypair)) if state == 'present': - for key in cloud.list_keypairs(): - if key.name == name: - if public_key and (public_key != key.public_key): - module.fail_json( - msg="Key name %s present but key hash not the same" - " as offered. Delete key first." % key.name - ) - else: - module.exit_json(changed=False, result="Key present") - try: - key = cloud.create_keypair(name, public_key) - except Exception, e: - module.exit_json( - msg="Error in creating the keypair: %s" % e.message - ) - if not public_key: - module.exit_json(changed=True, key=key.private_key) - module.exit_json(changed=True, key=None) + if keypair and keypair['name'] == name: + if public_key and (public_key != keypair['public_key']): + module.fail_json( + msg="Key name %s present but key hash not the same" + " as offered. Delete key first." % name + ) + else: + module.exit_json(changed=False, key=keypair) + + new_key = cloud.create_keypair(name, public_key) + module.exit_json(changed=True, key=new_key) elif state == 'absent': - for key in cloud.list_keypairs(): - if key.name == name: - try: - cloud.delete_keypair(name) - except Exception, e: - module.fail_json( - msg="Keypair deletion has failed: %s" % e.message - ) - module.exit_json(changed=True, result="deleted") - module.exit_json(changed=False, result="not present") + if keypair: + cloud.delete_keypair(name) + module.exit_json(changed=True) + module.exit_json(changed=False) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 8ba96aaf4bb5c7e3534408be693ead01c4c49027 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Thu, 25 Jun 2015 10:36:07 -0700 Subject: [PATCH 076/386] update documentation, adding new params --- cloud/amazon/s3.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index ecf35d00f5d..4edac74366b 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -56,6 +56,18 @@ options: required: false default: 600 aliases: [] + marker: + description: + - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order. + required: false + default: null + version_added: "2.0" + max_keys: + description: + - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys. + required: false + default: 1000 + version_added: "2.0" metadata: description: - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. @@ -64,7 +76,7 @@ options: version_added: "1.6" mode: description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys), create (bucket), delete (bucket), and delobj (delete object). + - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys (2.0+)), create (bucket), delete (bucket), and delobj (delete object). required: true default: null aliases: [] @@ -73,6 +85,12 @@ options: - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. required: false default: null + prefix: + description: + - Limits the response to keys that begin with the specified prefix for list mode + required: false + default: null + version_added: "2.0" version: description: - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket. From e90d02c35cfacda523eebdecdac14cc3194dc04d Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Thu, 25 Jun 2015 10:37:17 -0700 Subject: [PATCH 077/386] iterate through all keys in a more pythonic manner --- cloud/amazon/s3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 4edac74366b..8c5221e3c1f 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -237,7 +237,7 @@ def get_bucket(module, s3, bucket): def list_keys(module, bucket_object, prefix, marker, max_keys): all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys) - keys = map((lambda x: x.key), all_keys) + keys = [x.key for x in all_keys] module.exit_json(msg="LIST operation complete", s3_keys=keys) From d435d5ce0ae2597fdde4600dd07edbb8c9c4fdfe Mon Sep 17 00:00:00 2001 From: verm666 Date: Thu, 25 Jun 2015 10:56:29 -0700 Subject: [PATCH 078/386] This change is in response to issue #133. The original problem is: apt_repository.py connect to launchpad on every playbook run. In this patch apt_repository.py checks if required repository already exists or not. If no - paa will be added, if yes - just skip actions. --- packaging/os/apt_repository.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index eee58f77729..8f6d18d09d5 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -378,6 +378,25 @@ class UbuntuSourcesList(SourcesList): source = self._parse(line, raise_if_invalid_or_disabled=True)[2] self._remove_valid_source(source) + @property + def repos_urls(self): + _repositories = [] + for parsed_repos in self.files.values(): + for parsed_repo in parsed_repos: + enabled = parsed_repo[1] + source_line = parsed_repo[3] + + if not enabled: + continue + + if source_line.startswith('ppa:'): + source, ppa_owner, ppa_name = self._expand_ppa(i[3]) + _repositories.append(source) + else: + _repositories.append(source_line) + + return _repositories + def get_add_ppa_signing_key_callback(module): def _run_command(command): @@ -425,8 +444,13 @@ def main(): sources_before = sourceslist.dump() + if repo.startswith('ppa:'): + expanded_repo = sourceslist._expand_ppa(repo)[0] + else: + expanded_repo = repo + try: - if state == 'present': + if state == 'present' and expanded_repo not in sourceslist.repos_urls: sourceslist.add_source(repo) elif state == 'absent': sourceslist.remove_source(repo) From 2206477b739f767215ae0dadf4dea6e5cf36168f Mon Sep 17 00:00:00 2001 From: Vladimir Martsul Date: Fri, 26 Jun 2015 01:40:58 +0600 Subject: [PATCH 079/386] Add "force" description Add "force" option description --- files/template.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/files/template.py b/files/template.py index 2feb599abdf..a1dc72c27bd 100644 --- a/files/template.py +++ b/files/template.py @@ -47,6 +47,14 @@ options: required: false default: "" version_added: "1.2" + force: + description: + - the default is C(yes), which will replace the remote file when contents + are different than the source. If C(no), the file will only be transferred + if the destination does not exist. + required: false + choices: [ "yes", "no" ] + default: "yes" notes: - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." requirements: [] From 29e4a127e19fee326c5c698d249f6b9791b9e705 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 25 Jun 2015 17:11:38 -0500 Subject: [PATCH 080/386] Default net to 'bridge' in container diff This prevents an unnecessary reload when the `net` parameter is unspecified (i.e. almost always). --- cloud/docker/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 9986c94f9ec..a6090c4b0c1 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1108,8 +1108,8 @@ class DockerManager(object): # NETWORK MODE - expected_netmode = self.module.params.get('net') or '' - actual_netmode = container['HostConfig']['NetworkMode'] + expected_netmode = self.module.params.get('net') or 'bridge' + actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge' if actual_netmode != expected_netmode: self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) differing.append(container) From dc3161dfdd24c2a43806e4783c9c86f8fd6b72f0 Mon Sep 17 00:00:00 2001 From: Simon Olofsson Date: Thu, 20 Nov 2014 00:21:01 +0100 Subject: [PATCH 081/386] Add option docker_user for docker module. docker_user can be used to specify the user or UID to use within the container. --- cloud/docker/docker.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 9986c94f9ec..e723a14f0fb 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -160,6 +160,13 @@ options: specified by docker-py. default: docker-py default remote API version version_added: "1.8" + docker_user: + description: + - Username or UID to use within the container + required: false + default: + aliases: [] + version_added: "2.0" username: description: - Remote API username. @@ -1303,6 +1310,7 @@ class DockerManager(object): 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), 'host_config': self.create_host_config(), + 'user': self.module.params.get('docker_user'), } def do_create(count, params): @@ -1495,6 +1503,7 @@ def main(): tls_ca_cert = dict(required=False, default=None, type='str'), tls_hostname = dict(required=False, type='str', default=None), docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'), + docker_user = dict(default=None), username = dict(default=None), password = dict(), email = dict(), From e3d9b51cbb954a43f138ea02a9f9311ff7555ca9 Mon Sep 17 00:00:00 2001 From: Davide Guerri Date: Fri, 26 Jun 2015 12:53:20 +0100 Subject: [PATCH 082/386] Update os_floating_ip with new shade methods --- cloud/openstack/os_floating_ip.py | 283 +++++++++++++----------------- 1 file changed, 118 insertions(+), 165 deletions(-) diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index 2d939a9bcd7..9755b1d4159 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -1,8 +1,6 @@ #!/usr/bin/python -# coding: utf-8 -*- - -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2013, Benno Joy +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Author: Davide Guerri # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -17,9 +15,10 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . - try: import shade + from shade import meta + HAS_SHADE = True except ImportError: HAS_SHADE = False @@ -38,19 +37,39 @@ options: - The name or ID of the instance to which the IP address should be assigned. required: true - network_name: + network: description: - - Name of the network from which IP has to be assigned to VM. - Please make sure the network is an external network. - - Required if ip_address is not given. - required: true - default: None - internal_network_name: - description: - - Name of the network of the port to associate with the floating ip. - Necessary when VM multiple networks. + - The name or ID of a neutron external network or a nova pool name. required: false - default: None + floating_ip_address: + description: + - A floating IP address to attach or to detach. Required only if state + is absent. When state is present can be used to specify a IP address + to attach. + required: false + reuse: + description: + - When state is present, and floating_ip_address is not present, + this parameter can be used to specify whether we should try to reuse + a floating IP address already allocated to the project. + required: false + default: false + fixed_address: + description: + - To which fixed IP of server the floating IP address should be + attached to. + required: false + wait: + description: + - When attaching a floating IP address, specify whether we should + wait for it to appear as attached. + required: false + default false + timeout: + description: + - Time to wait for an IP address to appear as attached. See wait. + required: false + default 60 state: description: - Should the resource be present or absent. @@ -61,136 +80,54 @@ requirements: ["shade"] ''' EXAMPLES = ''' -# Assign a floating ip to the instance from an external network +# Assign a floating IP to the fist interface of `cattle001` from an exiting +# external network or nova pool. If a free floating IP is already allocated to +# the project, it is reused; if not, a new one is created. - os_floating_ip: - cloud: mordred + cloud: dguerri + server: cattle001 + +# Assign a new floating IP to the instance fixed ip `192.0.2.3` of +# `cattle001`. A new floating IP from the external network (or nova pool) +# ext_net is created. +- os_floating_ip: + cloud: dguerri state: present - server: vm1 - network_name: external_network - internal_network_name: internal_network + reuse: false + server: cattle001 + network: ext_net + fixed_address: 192.0.2.3 + wait: true + timeout: 180 + +# Detach a floating IP address from a server +- os_floating_ip: + cloud: dguerri + state: absent + floating_ip_address: 203.0.113.2 + server: cattle001 ''' -def _get_server_state(module, cloud): - info = None - server = cloud.get_server(module.params['server']) - if server: - info = server._info - status = info['status'] - if status != 'ACTIVE' and module.params['state'] == 'present': - module.fail_json( - msg="The VM is available but not Active. State: %s" % status - ) - return info, server - - -def _get_port_info(neutron, module, instance_id, internal_network_name=None): - subnet_id = None - if internal_network_name: - kwargs = {'name': internal_network_name} - networks = neutron.list_networks(**kwargs) - network_id = networks['networks'][0]['id'] - kwargs = { - 'network_id': network_id, - 'ip_version': 4 - } - subnets = neutron.list_subnets(**kwargs) - subnet_id = subnets['subnets'][0]['id'] - - kwargs = { - 'device_id': instance_id, - } - try: - ports = neutron.list_ports(**kwargs) - except Exception, e: - module.fail_json(msg="Error in listing ports: %s" % e.message) - - if subnet_id: - port = next(port for port in ports['ports'] if port['fixed_ips'][0]['subnet_id'] == subnet_id) - port_id = port['id'] - fixed_ip_address = port['fixed_ips'][0]['ip_address'] - else: - port_id = ports['ports'][0]['id'] - fixed_ip_address = ports['ports'][0]['fixed_ips'][0]['ip_address'] - - if not ports['ports']: - return None, None - return fixed_ip_address, port_id - - -def _get_floating_ip(neutron, module, fixed_ip_address): - kwargs = { - 'fixed_ip_address': fixed_ip_address - } - try: - ips = neutron.list_floatingips(**kwargs) - except Exception, e: - module.fail_json( - msg="Error in fetching the floatingips's %s" % e.message - ) - - if not ips['floatingips']: - return None, None - - return (ips['floatingips'][0]['id'], - ips['floatingips'][0]['floating_ip_address']) - - -def _create_and_associate_floating_ip(neutron, module, port_id, - net_id, fixed_ip): - kwargs = { - 'port_id': port_id, - 'floating_network_id': net_id, - 'fixed_ip_address': fixed_ip - } - - try: - result = neutron.create_floatingip({'floatingip': kwargs}) - except Exception, e: - module.fail_json( - msg="Error in updating the floating ip address: %s" % e.message - ) - - module.exit_json( - changed=True, - result=result, - public_ip=result['floatingip']['floating_ip_address'] - ) - - -def _get_public_net_id(neutron, module): - kwargs = { - 'name': module.params['network_name'], - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json("Error in listing neutron networks: %s" % e.message) - if not networks['networks']: +def _get_floating_ip(cloud, floating_ip_address): + f_ips = cloud.search_floating_ips( + filters={'floating_ip_address': floating_ip_address}) + if not f_ips: return None - return networks['networks'][0]['id'] - -def _update_floating_ip(neutron, module, port_id, floating_ip_id): - kwargs = { - 'port_id': port_id - } - try: - result = neutron.update_floatingip(floating_ip_id, - {'floatingip': kwargs}) - except Exception, e: - module.fail_json( - msg="Error in updating the floating ip address: %s" % e.message - ) - module.exit_json(changed=True, result=result) + return f_ips[0] def main(): argument_spec = openstack_full_argument_spec( - server = dict(required=True), - network_name = dict(required=True), - internal_network_name = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), + server=dict(required=True), + state=dict(default='present', choices=['absent', 'present']), + network=dict(required=False), + floating_ip_address=dict(required=False), + reuse=dict(required=False, type='bool', default=False), + fixed_address=dict(required=False), + wait=dict(required=False, type='bool', default=False), + timeout=dict(required=False, type='int', default=60), ) module_kwargs = openstack_module_kwargs() @@ -199,47 +136,63 @@ def main(): if not HAS_SHADE: module.fail_json(msg='shade is required for this module') + server_name_or_id = module.params['server'] state = module.params['state'] - internal_network_name = module.params['internal_network_name'] + network = module.params['network'] + floating_ip_address = module.params['floating_ip_address'] + reuse = module.params['reuse'] + fixed_address = module.params['fixed_address'] + wait = module.params['wait'] + timeout = module.params['timeout'] + + cloud = shade.openstack_cloud(**module.params) try: - cloud = shade.openstack_cloud(**module.params) - neutron = cloud.neutron_client - - server_info, server_obj = _get_server_state(module, cloud) - if not server_info: - module.fail_json(msg="The server provided cannot be found") - - fixed_ip, port_id = _get_port_info( - neutron, module, server_info['id'], internal_network_name) - if not port_id: - module.fail_json(msg="Cannot find a port for this instance," - " maybe fixed ip is not assigned") - - floating_id, floating_ip = _get_floating_ip(neutron, module, fixed_ip) + server = cloud.get_server(server_name_or_id) + if server is None: + module.fail_json( + msg="server {0} not found".format(server_name_or_id)) if state == 'present': - if floating_ip: - # This server already has a floating IP assigned - module.exit_json(changed=False, public_ip=floating_ip) + if floating_ip_address is None: + if reuse: + f_ip = cloud.available_floating_ip(network=network) + else: + f_ip = cloud.create_floating_ip(network=network) + else: + f_ip = _get_floating_ip(cloud, floating_ip_address) + if f_ip is None: + module.fail_json( + msg="floating IP {0} not found".format( + floating_ip_address)) - pub_net_id = _get_public_net_id(neutron, module) - if not pub_net_id: - module.fail_json( - msg="Cannot find the public network specified" - ) - _create_and_associate_floating_ip(neutron, module, port_id, - pub_net_id, fixed_ip) + cloud.attach_ip_to_server( + server_id=server['id'], floating_ip_id=f_ip['id'], + fixed_address=fixed_address, wait=wait, timeout=timeout) + # Update the floating IP status + f_ip = cloud.get_floating_ip(id=f_ip['id']) + module.exit_json(changed=True, floating_ip=f_ip) elif state == 'absent': - if floating_ip: - _update_floating_ip(neutron, module, None, floating_id) - module.exit_json(changed=False) + if floating_ip_address is None: + module.fail_json(msg="floating_ip_address is required") + + f_ip = _get_floating_ip(cloud, floating_ip_address) + + cloud.detach_ip_from_server( + server_id=server['id'], floating_ip_id=f_ip['id']) + # Update the floating IP status + f_ip = cloud.get_floating_ip(id=f_ip['id']) + module.exit_json(changed=True, floating_ip=f_ip) except shade.OpenStackCloudException as e: - module.fail_json(msg=e.message) + module.fail_json(msg=e.message, extra_data=e.extra_data) + # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() + + +if __name__ == '__main__': + main() From 725a7b2f59a296467439edde5aab75dc9552e60d Mon Sep 17 00:00:00 2001 From: verm666 Date: Fri, 26 Jun 2015 05:49:59 -0700 Subject: [PATCH 083/386] unarchive: fix work with 0 bytes archives This change is in response to issue #1575 --- files/unarchive.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/files/unarchive.py b/files/unarchive.py index 8053991b63d..a3544253402 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -300,6 +300,16 @@ def main(): if not os.access(src, os.R_OK): module.fail_json(msg="Source '%s' not readable" % src) + # skip working with 0 size archives + try: + if os.path.getsize(src) == 0: + res_args = { + 'changed': False + } + module.exit_json(**res_args) + except Exception, e: + module.fail_json(msg="Source '%s' not readable" % src) + # is dest OK to receive tar file? if not os.path.isdir(dest): module.fail_json(msg="Destination '%s' is not a directory" % dest) From 304e187a52abd165afaf5c4dd88ac28b66bfc149 Mon Sep 17 00:00:00 2001 From: Davide Guerri Date: Fri, 26 Jun 2015 14:53:48 +0100 Subject: [PATCH 084/386] Fix reuse argument documentation --- cloud/openstack/os_floating_ip.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index 9755b1d4159..5bd29240a67 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -81,19 +81,19 @@ requirements: ["shade"] EXAMPLES = ''' # Assign a floating IP to the fist interface of `cattle001` from an exiting -# external network or nova pool. If a free floating IP is already allocated to -# the project, it is reused; if not, a new one is created. +# external network or nova pool. A new floating IP from the first available +# external network is allocated to the project. - os_floating_ip: cloud: dguerri server: cattle001 # Assign a new floating IP to the instance fixed ip `192.0.2.3` of -# `cattle001`. A new floating IP from the external network (or nova pool) -# ext_net is created. +# `cattle001`. If a free floating IP is already allocated to the project, it is +# reused; if not, a new one is created. - os_floating_ip: cloud: dguerri state: present - reuse: false + reuse: yes server: cattle001 network: ext_net fixed_address: 192.0.2.3 From a81dea2b17428127b507888fd0c2fad59c1aca1e Mon Sep 17 00:00:00 2001 From: "Roetman, Victor" Date: Fri, 26 Jun 2015 14:50:29 -0400 Subject: [PATCH 085/386] apache2_module documetation update requires a2enmod and a2dismod --- web_infrastructure/apache2_module.py | 1 + 1 file changed, 1 insertion(+) diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index ec9a8985e60..cb43ba9b0eb 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -35,6 +35,7 @@ options: choices: ['present', 'absent'] default: present +requirements: ["a2enmod","a2dismod"] ''' EXAMPLES = ''' From d651b4169133ed8ef17d63d0418f733061fc1a6d Mon Sep 17 00:00:00 2001 From: Robb Wagoner Date: Fri, 26 Jun 2015 15:39:08 -0700 Subject: [PATCH 086/386] return health of instances and counts --- cloud/amazon/ec2_elb_lb.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 566db2d329a..9d626a98194 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -384,9 +384,33 @@ class ElbManager(object): 'hosted_zone_name': check_elb.canonical_hosted_zone_name, 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, 'lb_cookie_policy': lb_cookie_policy, - 'app_cookie_policy': app_cookie_policy + 'app_cookie_policy': app_cookie_policy, + 'instances': [instance.id for instance in check_elb.instances], + 'out_of_service_count': 0, + 'in_service_count': 0, + 'unknown_instance_state_count': 0 } + # status of instances behind the ELB + if info['instances']: + info['instance_health'] = [ dict({ + "instance_id": instance_state.instance_id, + "reason_code": instance_state.reason_code, + "state": instance_state.state, + }) for instance_state in self.elb_conn.describe_instance_health(self.name)] + else: + info['instance_health'] = [] + + # instance state counts: InService or OutOfService + if info['instance_health']: + for instance_state in info['instance_health']: + if instance_state['state'] == "InService": + info['in_service_count'] += 1 + elif instance_state['state'] == "OutOfService": + info['out_of_service_count'] += 1 + else: + info['unknown_instance_state_count'] =+ 1 + if check_elb.health_check: info['health_check'] = { 'target': check_elb.health_check.target, From 5e674ddcfc3ab317d2aa05fc84dfe206768cfdab Mon Sep 17 00:00:00 2001 From: Robb Wagoner Date: Thu, 11 Dec 2014 08:21:03 -0700 Subject: [PATCH 087/386] include all launch config properties in the return make all properties available when registering the result which is useful when wanting to launch a stand-alone instance based upon an existing Launch Config. --- cloud/amazon/ec2_lc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 3c292377a58..592d179a02b 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -225,7 +225,8 @@ def create_launch_config(connection, module): module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=instance_type) + security_groups=result.security_groups, instance_type=result.instance_type, + result=result) def delete_launch_config(connection, module): From 6f6d7f5c18296e3fe84fa5aef674948753ff52ae Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 27 Jun 2015 00:10:25 -0400 Subject: [PATCH 088/386] updated docs to clarify use of exclusive --- system/authorized_key.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index bb223acbe4d..9d944a7d724 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -34,7 +34,6 @@ options: - The username on the remote host whose authorized_keys file will be modified required: true default: null - aliases: [] key: description: - The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys) @@ -72,9 +71,11 @@ options: version_added: "1.4" exclusive: description: - - Whether to remove all other non-specified keys from the - authorized_keys file. Multiple keys can be specified in a single - key= string value by separating them by newlines. + - Whether to remove all other non-specified keys from the authorized_keys file. Multiple keys + can be specified in a single C(key) string value by separating them by newlines. + - This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration + of the loop, if you want multiple keys in the file you need to pass them all to C(key) in a + single batch as mentioned above. required: false choices: [ "yes", "no" ] default: "no" From 2d3e93e55823d03891e1c6612e959ee785f17575 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 27 Jun 2015 00:36:55 -0400 Subject: [PATCH 089/386] added doc to note that git the command line tool is required for this moduel to function fixes http://github.com/ansible/ansible/issues/11358 --- source_control/git.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index 369430211f3..bc35c97da93 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -173,7 +173,8 @@ options: to be installed. The commit MUST be signed and the public key MUST be trusted in the GPG trustdb. - +requirements: + - git (the command line tool) notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, From 5e82f7e11e0ac7cc7cdaeffb0787209afae79fb0 Mon Sep 17 00:00:00 2001 From: ToBeReplaced Date: Sat, 27 Jun 2015 11:41:01 -0600 Subject: [PATCH 090/386] Make ALL_IN_SCHEMA for tables affect views ALL TABLES is considered to include views, so we must check for reltypes 'r' and 'v', not just 'r'. This bug was introduced due to using a custom, backwards-compatible version of "ALL TABLES IN SCHEMA". --- database/postgresql/postgresql_privs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index 10f2361bfb2..8fefd3de648 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -315,7 +315,7 @@ class Connection(object): query = """SELECT relname FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'r'""" + WHERE nspname = %s AND relkind in ('r', 'v')""" self.cursor.execute(query, (schema,)) return [t[0] for t in self.cursor.fetchall()] From e80073ff8761fd48a1bb4a5cf8dd6970bbcf5084 Mon Sep 17 00:00:00 2001 From: Tal Auslander Date: Sun, 28 Jun 2015 13:45:48 +0300 Subject: [PATCH 091/386] stop reading from url on error --- windows/win_get_url.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index e5e1ea73c83..23463b681c0 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -62,7 +62,7 @@ Else { $stream = New-Object System.IO.StreamReader($response.GetResponseStream()) - $stream.ReadToEnd() | Set-Content -Path $dest -Force + $stream.ReadToEnd() | Set-Content -Path $dest -Force -ErrorAction Stop $result.changed = $true } From 1187399ffa7a5b4d99b5723fe6aeb44a2a1edbf3 Mon Sep 17 00:00:00 2001 From: Evan Kaufman Date: Sun, 28 Jun 2015 11:29:31 -0500 Subject: [PATCH 092/386] Add disabled option to cron module --- system/cron.py | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/system/cron.py b/system/cron.py index 88985e23071..57455d6e19d 100644 --- a/system/cron.py +++ b/system/cron.py @@ -4,6 +4,7 @@ # (c) 2012, Dane Summers # (c) 2013, Mike Grozak # (c) 2013, Patrick Callahan +# (c) 2015, Evan Kaufman # # This file is part of Ansible # @@ -116,10 +117,16 @@ options: required: false default: null choices: [ "reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly" ] + disabled: + description: + - If the job should be disabled (commented out) in the crontab. Only has effect if state=present + version_added: "1.9" + required: false + default: false requirements: - cron author: "Dane Summers (@dsummersl)" -updates: [ 'Mike Grozak', 'Patrick Callahan' ] +updates: [ 'Mike Grozak', 'Patrick Callahan', 'Evan Kaufman' ] """ EXAMPLES = ''' @@ -290,17 +297,22 @@ class CronTab(object): return [] - def get_cron_job(self,minute,hour,day,month,weekday,job,special): + def get_cron_job(self,minute,hour,day,month,weekday,job,special,disabled): + if disabled: + disable_prefix = '#' + else: + disable_prefix = '' + if special: if self.cron_file: - return "@%s %s %s" % (special, self.user, job) + return "%s@%s %s %s" % (disable_prefix, special, self.user, job) else: - return "@%s %s" % (special, job) + return "%s@%s %s" % (disable_prefix, special, job) else: if self.cron_file: - return "%s %s %s %s %s %s %s" % (minute,hour,day,month,weekday,self.user,job) + return "%s%s %s %s %s %s %s %s" % (disable_prefix,minute,hour,day,month,weekday,self.user,job) else: - return "%s %s %s %s %s %s" % (minute,hour,day,month,weekday,job) + return "%s%s %s %s %s %s %s" % (disable_prefix,minute,hour,day,month,weekday,job) return None @@ -413,7 +425,8 @@ def main(): special_time=dict(required=False, default=None, choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"], - type='str') + type='str'), + disabled=dict(default=False, type='bool') ), supports_check_mode = False, ) @@ -431,6 +444,7 @@ def main(): weekday = module.params['weekday'] reboot = module.params['reboot'] special_time = module.params['special_time'] + disabled = module.params['disabled'] do_install = state == 'present' changed = False @@ -481,7 +495,7 @@ def main(): changed = crontab.remove_job_file() module.exit_json(changed=changed,cron_file=cron_file,state=state) - job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time) + job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled) old_job = crontab.find_job(name) if do_install: From 786ec1dca156323a2ce83ba3bbe507d85ba22840 Mon Sep 17 00:00:00 2001 From: whiter Date: Sat, 27 Jun 2015 21:54:19 +1000 Subject: [PATCH 093/386] ec2_vpc_net refactor --- cloud/amazon/ec2_vpc_net.py | 324 +++++++++++++++--------------------- 1 file changed, 137 insertions(+), 187 deletions(-) diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py index 33c711e7683..41186ed0ab2 100644 --- a/cloud/amazon/ec2_vpc_net.py +++ b/cloud/amazon/ec2_vpc_net.py @@ -17,10 +17,11 @@ DOCUMENTATION = ''' --- module: ec2_vpc_net -short_description: configure AWS virtual private clouds +short_description: Configure AWS virtual private clouds description: - - Create or terminates AWS virtual private clouds. This module has a dependency on python-boto. + - Create or terminate AWS virtual private clouds. This module has a dependency on python-boto. version_added: "2.0" +author: Jonathan Davila (@defionscode) options: name: description: @@ -30,23 +31,25 @@ options: description: - The CIDR of the VPC required: yes - aliases: [] tenancy: description: - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created. required: false default: default + choices: [ 'default', 'dedicated' ] dns_support: description: - Whether to enable AWS DNS support. required: false - default: true + default: yes + choices: [ 'yes', 'no' ] dns_hostnames: description: - Whether to enable AWS hostname support. required: false - default: true - dhcp_id: + default: yes + choices: [ 'yes', 'no' ] + dhcp_opts_id: description: - the id of the DHCP options to use for this vpc default: null @@ -61,30 +64,32 @@ options: - The state of the VPC. Either absent or present. default: present required: false + choices: [ 'present', 'absent' ] multi_ok: description: - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created. default: false required: false -author: Jonathan Davila + extends_documentation_fragment: aws ''' EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + # Create a VPC with dedicate tenancy and a couple of tags -- ec2_vpc: +- ec2_vpc_net: name: Module_dev2 - cidr_block: 170.10.0.0/16 + cidr_block: 10.10.0.0/16 region: us-east-1 tags: - new_vpc: ec2_vpc_module - this: works22 + module: ec2_vpc_net + this: works tenancy: dedicated ''' - import time import sys @@ -92,8 +97,7 @@ try: import boto import boto.ec2 import boto.vpc - from boto.exception import EC2ResponseError - + from boto.exception import BotoServerError HAS_BOTO=True except ImportError: HAS_BOTO=False @@ -110,12 +114,11 @@ def boto_exception(err): return error def vpc_exists(module, vpc, name, cidr_block, multi): - """Returns True or False in regards to the existance of a VPC. When supplied + """Returns True or False in regards to the existence of a VPC. When supplied with a CIDR, it will check for matching tags to determine if it is a match otherwise it will assume the VPC does not exist and thus return false. """ - exists=False - matched_vpc=None + matched_vpc = None try: matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block}) @@ -123,114 +126,69 @@ def vpc_exists(module, vpc, name, cidr_block, multi): e_msg=boto_exception(e) module.fail_json(msg=e_msg) - if len(matching_vpcs) == 1 and not multi: - exists=True - matched_vpc=str(matching_vpcs).split(':')[1].split(']')[0] - elif len(matching_vpcs) > 1 and not multi: - module.fail_json(msg='Currently there are %d VPCs that have the same name and ' + if len(matching_vpcs) == 1: + matched_vpc = matching_vpcs[0] + elif len(matching_vpcs) > 1: + if multi: + module.fail_json(msg='Currently there are %d VPCs that have the same name and ' 'CIDR block you specified. If you would like to create ' - 'the VPC anyways please pass True to the multi_ok param.' % len(matching_vpcs)) - - return exists, matched_vpc - -def vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags): - """This returns True or False. Intended to run after vpc_exists. - It will check all the characteristics of the parameters passed and compare them - to the active VPC. If any discrepancy is found, it will report true, meaning that - the VPC needs to be update in order to match the specified state in the params. - """ - - update_dhcp=False - update_tags=False - dhcp_match=False - - try: - dhcp_list=vpc.get_all_dhcp_options() - - if dhcp_id is not None: - has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id}) - for opts in dhcp_list: - if (str(opts).split(':')[1] == dhcp_id) or has_default: - dhcp_match=True - break - else: - pass - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - - if not dhcp_match or (has_default and dhcp_id != 'default'): - update_dhcp=True - - if dns_hostnames and dns_support == False: - module.fail_json('In order to enable DNS Hostnames you must have DNS support enabled') - else: - - # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute - # which is needed in order to detect the current status of DNS options. For now we just update - # the attribute each time and is not used as a changed-factor. - try: - vpc.modify_vpc_attribute(vpc_id, enable_dns_support=dns_support) - vpc.modify_vpc_attribute(vpc_id, enable_dns_hostnames=dns_hostnames) - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - - if tags: - try: - current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) - if not set(tags.items()).issubset(set(current_tags.items())): - update_tags=True - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - - return update_dhcp, update_tags + 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) + + return matched_vpc -def update_vpc_tags(module, vpc, vpc_id, tags, name): +def update_vpc_tags(vpc, module, vpc_obj, tags, name): + + if tags is None: + tags = dict() + tags.update({'Name': name}) try: - vpc.create_tags(vpc_id, tags) - updated_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) + current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id})) + if sorted(current_tags) != sorted(tags): + vpc.create_tags(vpc_obj.id, tags) + return True + else: + return False except Exception, e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) + - return updated_tags +def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + + if vpc_obj.dhcp_options_id != dhcp_id: + connection.associate_dhcp_options(dhcp_id, vpc_obj.id) + return True + else: + return False +def get_vpc_values(vpc_obj): -def update_dhcp_opts(module, vpc, vpc_id, dhcp_id): - try: - vpc.associate_dhcp_options(dhcp_id, vpc_id) - dhcp_list=vpc.get_all_dhcp_options() - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - - for opts in dhcp_list: - vpc_dhcp=vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}) - matched=False - if opts == dhcp_id: - matched=True - return opts - - if matched == False: - return dhcp_id + if vpc_obj is not None: + vpc_values = vpc_obj.__dict__ + if "region" in vpc_values: + vpc_values.pop("region") + if "item" in vpc_values: + vpc_values.pop("item") + if "connection" in vpc_values: + vpc_values.pop("connection") + return vpc_values + else: + return None def main(): argument_spec=ec2_argument_spec() argument_spec.update(dict( - name=dict(type='str', default=None, required=True), - cidr_block=dict(type='str', default=None, required=True), - tenancy=dict(choices=['default', 'dedicated'], default='default'), - dns_support=dict(type='bool', default=True), - dns_hostnames=dict(type='bool', default=True), - dhcp_opts_id=dict(type='str', default=None, required=False), - tags=dict(type='dict', required=False, default=None), - state=dict(choices=['present', 'absent'], default='present'), - region=dict(type='str', required=True), - multi_ok=dict(type='bool', default=False) + name = dict(type='str', default=None, required=True), + cidr_block = dict(type='str', default=None, required=True), + tenancy = dict(choices=['default', 'dedicated'], default='default'), + dns_support = dict(type='bool', default=True), + dns_hostnames = dict(type='bool', default=True), + dhcp_opts_id = dict(type='str', default=None, required=False), + tags = dict(type='dict', required=False, default=None), + state = dict(choices=['present', 'absent'], default='present'), + multi_ok = dict(type='bool', default=False) ) ) @@ -239,7 +197,7 @@ def main(): ) if not HAS_BOTO: - module.fail_json(msg='Boto is required for this module') + module.fail_json(msg='boto is required for this module') name=module.params.get('name') cidr_block=module.params.get('cidr_block') @@ -250,93 +208,85 @@ def main(): tags=module.params.get('tags') state=module.params.get('state') multi=module.params.get('multi_ok') - + changed=False - new_dhcp_opts=None - new_tags=None - update_dhcp=False - update_tags=False - - region, ec2_url, aws_connect_kwargs=get_aws_connection_info(module) - - try: - vpc=boto.vpc.connect_to_region( - region, - **aws_connect_kwargs - ) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - already_exists, vpc_id=vpc_exists(module, vpc, name, cidr_block, multi) - - if already_exists: - update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags) - if update_dhcp or update_tags: - changed=True + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: try: - e_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) - dhcp_list=vpc.get_all_dhcp_options() - has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id}) - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - - dhcp_opts=None - - try: - for opts in dhcp_list: - if vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}): - dhcp_opts=opts - break - else: - pass - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - - if not dhcp_opts and has_default: - dhcp_opts='default' + connection = connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + if dns_hostnames and not dns_support: + module.fail_json('In order to enable DNS Hostnames you must also enable DNS support') if state == 'present': - - if not changed and already_exists: - module.exit_json(changed=changed, vpc_id=vpc_id) - elif changed: - if update_dhcp: - dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id) - if update_tags: - e_tags=update_vpc_tags(module, vpc, vpc_id, tags, name) - - module.exit_json(changed=changed, name=name, dhcp_options_id=dhcp_opts, tags=e_tags) - - if not already_exists: + + # Check if VPC exists + vpc_obj = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_obj is None: try: - vpc_id=str(vpc.create_vpc(cidr_block, instance_tenancy=tenancy)).split(':')[1] - vpc.create_tags(vpc_id, dict(Name=name)) - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) + vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy) + changed = True + except BotoServerError, e: + module.fail_json(msg=e) + + if dhcp_id is not None: + try: + if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + changed = True + except BotoServerError, e: + module.fail_json(msg=e) + + if tags is not None or name is not None: + try: + if update_vpc_tags(connection, module, vpc_obj, tags, name): + changed = True + except BotoServerError, e: + module.fail_json(msg=e) + - update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags) + # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute + # which is needed in order to detect the current status of DNS options. For now we just update + # the attribute each time and is not used as a changed-factor. + try: + connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support) + connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames) + except BotoServerError, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + # get the vpc obj again in case it has changed + try: + vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0] + except BotoServerError, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) - if update_dhcp: - new_dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id) - if update_tags: - new_tags=update_vpc_tags(module, vpc, vpc_id, tags, name) - module.exit_json(changed=True, name=name, vpc_id=vpc_id, dhcp_options=new_dhcp_opts, tags=new_tags) elif state == 'absent': - if already_exists: - changed=True + + # Check if VPC exists + vpc_obj = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_obj is not None: try: - vpc.delete_vpc(vpc_id) - module.exit_json(changed=changed, vpc_id=vpc_id) - except Exception, e: - e_msg=boto_exception(e) + connection.delete_vpc(vpc_obj.id) + vpc_obj = None + changed = True + except BotoServerError, e: + e_msg = boto_exception(e) module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " - "and/or ec2_vpc_rt modules to ensure the other components are absent." % e_msg) - else: - module.exit_json(msg="VPC is absent") + "and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg) + + module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) + # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * From 7b0b75ceedf526826ebf591709afea4c8fdde7bb Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 29 Jun 2015 10:34:24 -0400 Subject: [PATCH 094/386] Fix dict syntax typo --- cloud/openstack/os_client_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index a12cd8fe65a..2c4af5c8c08 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -52,9 +52,9 @@ EXAMPLES = ''' def main(): - module = AnsibleModule({ + module = AnsibleModule(argument_spec=dict( clouds=dict(required=False, default=[]), - }) + )) p = module.params try: From 73390f8ecc4df506a04a0406a42ecbea7d57501b Mon Sep 17 00:00:00 2001 From: James Meickle Date: Mon, 29 Jun 2015 13:23:03 -0400 Subject: [PATCH 095/386] Change uri debug example --- network/basics/uri.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 3de17c12d60..8095eaffe67 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -269,7 +269,7 @@ def url_filename(url): def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout, validate_certs): # To debug - #httplib2.debug = 4 + #httplib2.debuglevel = 4 # Handle Redirects if redirects == "all" or redirects == "yes": From 692045f693665f810736d0e07782e62bb4fb1f2d Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Mon, 29 Jun 2015 14:15:23 -0400 Subject: [PATCH 096/386] update docs for cloudformation --- cloud/amazon/cloudformation.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index dee292aeba3..cccdd156f20 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -51,6 +51,7 @@ options: template: description: - The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present" + Must give full path to the file, relative to the playbook. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" required: false default: null aliases: [] @@ -115,6 +116,22 @@ EXAMPLES = ''' tags: Stack: "ansible-cloudformation" +# Basic role example +- name: launch ansible cloudformation example + cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "roles/cloudformation/files/cloudformation-example.json" + template_parameters: + KeyName: "jmartin" + DiskType: "ephemeral" + InstanceType: "m1.small" + ClusterSize: 3 + tags: + Stack: "ansible-cloudformation" + # Removal example - name: tear down old deployment cloudformation: From e6fc129013b0dfd2873fad648a867cc87dc76cc6 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Mon, 29 Jun 2015 14:49:13 -0400 Subject: [PATCH 097/386] Add a note about the return value. --- cloud/openstack/os_keypair.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index a9c2640628f..f485d7fd2fc 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -33,6 +33,10 @@ extends_documentation_fragment: openstack version_added: "2.0" description: - Add or Remove key pair from OpenStack +notes: + - The module returns a dictionary describing the keypair, with + keys including: id, name, public_key. A private_key entry may + also be included if a keypair was generated for you. options: name: description: From def5fdcb2123b8a0146fe8b94bf19f82db3248a5 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Mon, 29 Jun 2015 15:14:50 -0400 Subject: [PATCH 098/386] no_log to iam password --- cloud/amazon/iam.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index bda953faab4..df8f3423411 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -509,7 +509,7 @@ def main(): groups=dict(type='list', default=None, required=False), state=dict( default=None, required=True, choices=['present', 'absent', 'update']), - password=dict(default=None, required=False), + password=dict(default=None, required=False, no_log=True), update_password=dict(default='always', required=False, choices=['always', 'on_create']), access_key_state=dict(default=None, required=False, choices=[ 'active', 'inactive', 'create', 'remove', From 5da9c6a1c77d40b4d52ac3ff9799e5bcb0ab3847 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 12:42:50 -0700 Subject: [PATCH 099/386] Add testing of docs to the core repo --- .travis.yml | 1 + test-docs.sh | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100755 test-docs.sh diff --git a/.travis.yml b/.travis.yml index 0e3a2af23b3..9a65ec487d3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,3 +14,4 @@ script: - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . + - ./test-docs.sh core diff --git a/test-docs.sh b/test-docs.sh new file mode 100755 index 00000000000..76297fbada6 --- /dev/null +++ b/test-docs.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -x + +CHECKOUT_DIR=".ansible-checkout" +MOD_REPO="$1" + +# Hidden file to avoid the module_formatter recursing into the checkout +git clone https://github.com/ansible/ansible "$CHECKOUT_DIR" +cd "$CHECKOUT_DIR" +git submodule update --init +rm -rf "lib/ansible/modules/$MOD_REPO" +ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO" + +pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx + +. ./hacking/env-setup +PAGER=/bin/cat bin/ansible-doc -l +if [ $? -ne 0 ] ; then + exit $? +fi +make -C docsite From 7970924bd56e2bbd53f6588b023ca3497afc6ebb Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Mon, 29 Jun 2015 15:55:15 -0400 Subject: [PATCH 100/386] Use newest documentation style for return value. --- cloud/openstack/os_keypair.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index f485d7fd2fc..7a0c1ca47a0 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -33,10 +33,6 @@ extends_documentation_fragment: openstack version_added: "2.0" description: - Add or Remove key pair from OpenStack -notes: - - The module returns a dictionary describing the keypair, with - keys including: id, name, public_key. A private_key entry may - also be included if a keypair was generated for you. options: name: description: @@ -78,6 +74,26 @@ EXAMPLES = ''' name: ansible_key ''' +RETURN = ''' +id: + description: Unique UUID. + returned: success + type: string +name: + description: Name given to the keypair. + returned: success + type: string +public_key: + description: The public key value for the keypair. + returned: success + type: string +private_key: + description: The private key value for the keypair. + returned: Only when a keypair is generated for the user (e.g., when creating one + and a public key is not specified). + type: string +''' + def _system_state_change(module, keypair): state = module.params['state'] From 7edacf6b1c480099eabd6f9ad9ad21d056ac4053 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 13:20:15 -0700 Subject: [PATCH 101/386] Use module.fail_json() instead of sys.exit() --- cloud/amazon/iam_policy.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index f1a6abdd0a6..26d65450ec9 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -112,13 +112,12 @@ task: ''' import json import urllib -import sys try: import boto import boto.iam + HAS_BOTO = True except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + HAS_BOTO = False def boto_exception(err): '''generic error message handler''' @@ -278,6 +277,9 @@ def main(): argument_spec=argument_spec, ) + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + state = module.params.get('state').lower() iam_type = module.params.get('iam_type').lower() state = module.params.get('state') From ddc78c82a4db6e8ee8c377fc08178e16fafdbbf0 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Mon, 29 Jun 2015 14:06:50 -0700 Subject: [PATCH 102/386] Document auto_floating_ip argument --- cloud/openstack/os_server.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 78a46f78c04..959f39880f8 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -90,6 +90,11 @@ options: - Ensure instance has public ip however the cloud wants to do that required: false default: 'yes' + auto_floating_ip: + description: + - If the module should automatically assign a floating IP + required: false + default: 'yes' floating_ips: description: - list of valid floating IPs that pre-exist to assign to this node From bb816f046c5acb5618cfb135f82cf2e8090d832d Mon Sep 17 00:00:00 2001 From: Austin Brown Date: Mon, 29 Jun 2015 23:13:29 -0700 Subject: [PATCH 103/386] Adding version detection --- packaging/language/gem.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packaging/language/gem.py b/packaging/language/gem.py index d3963c816c5..491402d115f 100644 --- a/packaging/language/gem.py +++ b/packaging/language/gem.py @@ -84,7 +84,7 @@ options: - Allow adding build flags for gem compilation required: false version_added: "2.0" -author: +author: - "Ansible Core Team" - "Johan Wiren" ''' @@ -196,7 +196,11 @@ def install(module): if module.params['pre_release']: cmd.append('--pre') if not module.params['include_doc']: - cmd.append('--no-document') + if major and major < 2: + cmd.append('--no-rdoc') + cmd.append('--no-ri') + else: + cmd.append('--no-document') cmd.append(module.params['gem_source']) if module.params['build_flags']: cmd.extend([ '--', module.params['build_flags'] ]) From 4da3a724f1d57f5e1fe7f29804d82d835cceb3a5 Mon Sep 17 00:00:00 2001 From: Michael Weinrich Date: Sun, 21 Jun 2015 23:51:14 +0200 Subject: [PATCH 104/386] Fix connection creation to allow usage of profiles with boto --- cloud/amazon/iam_policy.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 26d65450ec9..72e70221d29 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -115,6 +115,7 @@ import urllib try: import boto import boto.iam + import boto.ec2 HAS_BOTO = True except ImportError: HAS_BOTO = False @@ -293,13 +294,10 @@ def main(): else: pdoc=None - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connection.IAMConnection( - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key, - ) + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) From 02ea210db9f60ab68b5ae4e18f7150f3e5993954 Mon Sep 17 00:00:00 2001 From: Andreas Reischuck Date: Sat, 27 Jun 2015 23:34:16 +0200 Subject: [PATCH 105/386] fixed win_file state=touch --- windows/win_file.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 index 0f3c20ec8e3..f8416120abf 100644 --- a/windows/win_file.ps1 +++ b/windows/win_file.ps1 @@ -56,7 +56,7 @@ If ( $state -eq "touch" ) } Else { - echo $null > $file + echo $null > $path } $result.changed = $TRUE } From 4ef5a45347558349f0fa23e138bf18559dd9a672 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 30 Jun 2015 05:08:38 -0700 Subject: [PATCH 106/386] Add version that the profilename param was added --- packaging/os/rhn_register.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/rhn_register.py b/packaging/os/rhn_register.py index 4207acc8c28..b67b442aa22 100644 --- a/packaging/os/rhn_register.py +++ b/packaging/os/rhn_register.py @@ -61,6 +61,7 @@ options: - supply an profilename for use with registration required: False default: null + version_added: "2.0" channels: description: - Optionally specify a list of comma-separated channels to subscribe to upon successful registration. From 3ccf2e8a86ead4df8f0d8c328fac941465838d91 Mon Sep 17 00:00:00 2001 From: Ash Caire Date: Tue, 30 Jun 2015 23:04:15 +1000 Subject: [PATCH 107/386] add headers param to s3 uploads --- cloud/amazon/s3.py | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 095befe173a..139c009088c 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -56,6 +56,11 @@ options: required: false default: 600 aliases: [] + headers: + description: + - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. + required: false + default: null marker: description: - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order. @@ -128,7 +133,7 @@ options: version_added: "1.3" requirements: [ "boto" ] -author: +author: - "Lester Wade (@lwade)" - "Ralph Tice (@ralph-tice)" extends_documentation_fragment: aws @@ -147,6 +152,9 @@ EXAMPLES = ''' # PUT/upload with metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' +# PUT/upload with custom headers +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put headers=x-amz-grant-full-control=emailAddress=owner@example.com + # List keys simple - s3: bucket=mybucket mode=list @@ -162,7 +170,7 @@ EXAMPLES = ''' # Delete a bucket and all contents - s3: bucket=mybucket mode=delete -# GET an object but dont download if the file checksums match +# GET an object but dont download if the file checksums match - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different # Delete an object from a bucket @@ -285,7 +293,7 @@ def path_check(path): return False -def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt): +def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers): try: bucket = s3.lookup(bucket) key = bucket.new_key(obj) @@ -293,7 +301,7 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt): for meta_key in metadata.keys(): key.set_metadata(meta_key, metadata[meta_key]) - key.set_contents_from_filename(src, encrypt_key=encrypt) + key.set_contents_from_filename(src, encrypt_key=encrypt, headers=headers) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) except s3.provider.storage_copy_error, e: @@ -367,6 +375,7 @@ def main(): dest = dict(default=None), encrypt = dict(default=True, type='bool'), expiry = dict(default=600, aliases=['expiration']), + headers = dict(type='dict'), marker = dict(default=None), max_keys = dict(default=1000), metadata = dict(type='dict'), @@ -390,6 +399,7 @@ def main(): expiry = int(module.params['expiry']) if module.params.get('dest'): dest = os.path.expanduser(module.params.get('dest')) + headers = module.params.get('headers') marker = module.params.get('marker') max_keys = module.params.get('max_keys') metadata = module.params.get('metadata') @@ -402,16 +412,16 @@ def main(): s3_url = module.params.get('s3_url') src = module.params.get('src') - if overwrite not in ['always', 'never', 'different']: - if module.boolean(overwrite): - overwrite = 'always' - else: + if overwrite not in ['always', 'never', 'different']: + if module.boolean(overwrite): + overwrite = 'always' + else: overwrite='never' - if overwrite not in ['always', 'never', 'different']: - if module.boolean(overwrite): - overwrite = 'always' - else: + if overwrite not in ['always', 'never', 'different']: + if module.boolean(overwrite): + overwrite = 'always' + else: overwrite='never' region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) @@ -533,24 +543,24 @@ def main(): if md5_local == md5_remote: sum_matches = True if overwrite == 'always': - upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) else: get_download_url(module, s3, bucket, obj, expiry, changed=False) else: sum_matches = False if overwrite in ('always', 'different'): - upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.") # If neither exist (based on bucket existence), we can create both. if bucketrtn is False and pathrtn is True: create_bucket(module, s3, bucket, location) - upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) # If bucket exists but key doesn't, just upload. if bucketrtn is True and pathrtn is True and keyrtn is False: - upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) # Delete an object from a bucket, not the entire bucket if mode == 'delobj': From 8deee99fcc72852e7275746c2793976790881d50 Mon Sep 17 00:00:00 2001 From: verm666 Date: Tue, 30 Jun 2015 08:14:30 -0700 Subject: [PATCH 108/386] unarchive: fix @bcoca's remarks, issue #1575 --- files/unarchive.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index a3544253402..3ee83de0dcd 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -303,10 +303,7 @@ def main(): # skip working with 0 size archives try: if os.path.getsize(src) == 0: - res_args = { - 'changed': False - } - module.exit_json(**res_args) + module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src) except Exception, e: module.fail_json(msg="Source '%s' not readable" % src) From edad5c80ffc49706d44c98ee449c436b352a8817 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 30 Jun 2015 13:18:56 -0700 Subject: [PATCH 109/386] Few minor things from review of the pull request --- cloud/openstack/os_keypair.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index 7a0c1ca47a0..73656883a76 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -56,7 +56,7 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present -requirements: ["shade"] +requirements: [] ''' EXAMPLES = ''' @@ -163,4 +163,5 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() +if __name__ == '__main__': + main() From 02b6df3160e66f92ef0e0cea363bce9472ce94b5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Jul 2015 05:00:08 -0700 Subject: [PATCH 110/386] Fix indentation levels in os_keypair --- cloud/openstack/os_keypair.py | 42 +++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index 73656883a76..f62cc51bf64 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -32,30 +32,30 @@ short_description: Add/Delete a keypair from OpenStack extends_documentation_fragment: openstack version_added: "2.0" description: - - Add or Remove key pair from OpenStack + - Add or Remove key pair from OpenStack options: - name: - description: - - Name that has to be given to the key pair - required: true - default: None - public_key: - description: - - The public key that would be uploaded to nova and injected into VMs - upon creation. - required: false - default: None - public_key_file: - description: - - Path to local file containing ssh public key. Mutually exclusive - with public_key. + name: + description: + - Name that has to be given to the key pair + required: true + default: None + public_key: + description: + - The public key that would be uploaded to nova and injected into VMs + upon creation. required: false default: None - state: - description: - - Should the resource be present or absent. - choices: [present, absent] - default: present + public_key_file: + description: + - Path to local file containing ssh public key. Mutually exclusive + with public_key. + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present requirements: [] ''' From b00b3f2b3c57cae8131fb15abbd0ddb0f3515cfb Mon Sep 17 00:00:00 2001 From: verm666 Date: Wed, 1 Jul 2015 07:04:45 -0700 Subject: [PATCH 111/386] fix authorized_keys in check_mode This change is in response to issue #1515. Original pull request #1580. The original problem is: in authorized_key module you have no idea about users which will be created by Ansible at first run. I can propose next two ways to solve this problem: 1. Combine modules system/user.py and system/authorized_key.py in one module (so you will know everything about users in that module) 2. Use small workaround: add my commit and always provide 'path' parameter for authorized_key module during runs with --check option. --- system/authorized_key.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index bb223acbe4d..e52b4e7556a 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -138,7 +138,7 @@ import shlex class keydict(dict): """ a dictionary that maintains the order of keys as they are added """ - + # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class def __init__(self, *args, **kw): @@ -146,7 +146,7 @@ class keydict(dict): self.itemlist = super(keydict,self).keys() def __setitem__(self, key, value): self.itemlist.append(key) - super(keydict,self).__setitem__(key, value) + super(keydict,self).__setitem__(key, value) def __iter__(self): return iter(self.itemlist) def keys(self): @@ -154,7 +154,7 @@ class keydict(dict): def values(self): return [self[key] for key in self] def itervalues(self): - return (self[key] for key in self) + return (self[key] for key in self) def keyfile(module, user, write=False, path=None, manage_dir=True): """ @@ -168,6 +168,13 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): :return: full path string to authorized_keys for user """ + if module.check_mode: + if path is None: + module.fail_json(msg="You must provide full path to key file in check mode") + else: + keysfile = path + return keysfile + try: user_entry = pwd.getpwnam(user) except KeyError, e: @@ -214,8 +221,8 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): return keysfile def parseoptions(module, options): - ''' - reads a string containing ssh-key options + ''' + reads a string containing ssh-key options and returns a dictionary of those options ''' options_dict = keydict() #ordered dict @@ -246,7 +253,7 @@ def parsekey(module, raw_key): 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', - 'ecdsa-sha2-nistp521', + 'ecdsa-sha2-nistp521', 'ssh-dss', 'ssh-rsa', ] From 910728f6c3b49de97df9af2abc730ff589230754 Mon Sep 17 00:00:00 2001 From: Matthew Gilliard Date: Wed, 1 Jul 2015 12:07:27 +0100 Subject: [PATCH 112/386] Handle race condition in directory creation. If we try to make a directory, but someone else creates the directory at the same time as us, we don't need to raise that error to the user. They asked for the directory to exist, and now it does. This fixes the race condition which was causing that error to be raised, and closes #1648. --- files/file.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index 55d3665028e..ba5afd6809f 100644 --- a/files/file.py +++ b/files/file.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import errno import shutil import stat import grp @@ -280,7 +281,13 @@ def main(): if not os.path.isabs(path): curpath = curpath.lstrip('/') if not os.path.exists(curpath): - os.mkdir(curpath) + try: + os.mkdir(curpath) + except OSError, ex: + # Possibly something else created the dir since the os.path.exists + # check above. As long as it's a dir, we don't need to error out. + if not (ex.errno == errno.EEXISTS and os.isdir(curpath)): + raise tmp_file_args = file_args.copy() tmp_file_args['path']=curpath changed = module.set_fs_attributes_if_different(tmp_file_args, changed) From 1b21e37fcbc135608b602bcc011bbcaeabd59ca3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Jul 2015 07:24:39 -0700 Subject: [PATCH 113/386] Disable travis docs checks --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 9a65ec487d3..91d1b9585d7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,4 +14,4 @@ script: - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . - - ./test-docs.sh core + #- ./test-docs.sh core From 5a254e6303b82f8fe73e6ab7b1579ac0c8e36e14 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:22:50 -0500 Subject: [PATCH 114/386] Replace tabbed indentation with spaces for mysql_db module --- database/mysql/mysql_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index e9a530811d4..c018ad143db 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -326,7 +326,7 @@ def main(): if state in ['dump','import']: if target is None: module.fail_json(msg="with state=%s target is required" % (state)) - if db == 'all': + if db == 'all': connect_to_db = 'mysql' db = 'mysql' all_databases = True From 9eb4219f79446c2302e346f6e4464ea2ead8626e Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:23:28 -0500 Subject: [PATCH 115/386] Replaced tabbed indentation with spaces for apt module --- packaging/os/apt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 09129a73fa5..9172c69763d 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -230,10 +230,10 @@ def package_status(m, pkgname, version, cache, state): try: provided_packages = cache.get_providing_packages(pkgname) if provided_packages: - is_installed = False + is_installed = False # when virtual package providing only one package, look up status of target package if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: - package = provided_packages[0] + package = provided_packages[0] installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install') if installed: is_installed = True From 4f43c4c09cf717b2cb0b59041f3e2da21cedf1a9 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:23:51 -0500 Subject: [PATCH 116/386] Replaced tabbed indentation with spaces for subversion module --- source_control/subversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/subversion.py b/source_control/subversion.py index e3ff6dbfba5..cae4702e174 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -121,7 +121,7 @@ class Subversion(object): def checkout(self): '''Creates new svn working directory if it does not already exist.''' self._exec(["checkout", "-r", self.revision, self.repo, self.dest]) - + def export(self, force=False): '''Export svn repo to directory''' cmd = ["export"] From b6b576abf6c2e73c8fd4a5308c0cfff00f6d300d Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:24:01 -0500 Subject: [PATCH 117/386] Replaced tabbed indentation with spaces for group module --- system/group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/group.py b/system/group.py index d952cb5c28c..53ab5f904dc 100644 --- a/system/group.py +++ b/system/group.py @@ -121,7 +121,7 @@ class Group(object): if len(cmd) == 1: return (None, '', '') if self.module.check_mode: - return (0, '', '') + return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) From c2d0fbd45ba882c8a211f645e22e029d8c0b8b2a Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:24:23 -0500 Subject: [PATCH 118/386] Remove unnecessary imports in a docs only file for win_copy --- windows/win_copy.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/windows/win_copy.py b/windows/win_copy.py index efdebc5a4a6..acc6c9ef2e0 100644 --- a/windows/win_copy.py +++ b/windows/win_copy.py @@ -18,8 +18,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import os -import time DOCUMENTATION = ''' --- From ed179fe379da90dafd8e89be94402511a899e49a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 1 Jul 2015 10:39:50 -0400 Subject: [PATCH 119/386] now captures any exceptions when trying to create directories --- files/file.py | 44 ++++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/files/file.py b/files/file.py index ba5afd6809f..c3267f7f18b 100644 --- a/files/file.py +++ b/files/file.py @@ -271,26 +271,30 @@ def main(): module.exit_json(changed=True) changed = True curpath = '' - # Split the path so we can apply filesystem attributes recursively - # from the root (/) directory for absolute paths or the base path - # of a relative path. We can then walk the appropriate directory - # path to apply attributes. - for dirname in path.strip('/').split('/'): - curpath = '/'.join([curpath, dirname]) - # Remove leading slash if we're creating a relative path - if not os.path.isabs(path): - curpath = curpath.lstrip('/') - if not os.path.exists(curpath): - try: - os.mkdir(curpath) - except OSError, ex: - # Possibly something else created the dir since the os.path.exists - # check above. As long as it's a dir, we don't need to error out. - if not (ex.errno == errno.EEXISTS and os.isdir(curpath)): - raise - tmp_file_args = file_args.copy() - tmp_file_args['path']=curpath - changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + + try: + # Split the path so we can apply filesystem attributes recursively + # from the root (/) directory for absolute paths or the base path + # of a relative path. We can then walk the appropriate directory + # path to apply attributes. + for dirname in path.strip('/').split('/'): + curpath = '/'.join([curpath, dirname]) + # Remove leading slash if we're creating a relative path + if not os.path.isabs(path): + curpath = curpath.lstrip('/') + if not os.path.exists(curpath): + try: + os.mkdir(curpath) + except OSError, ex: + # Possibly something else created the dir since the os.path.exists + # check above. As long as it's a dir, we don't need to error out. + if not (ex.errno == errno.EEXISTS and os.isdir(curpath)): + raise + tmp_file_args = file_args.copy() + tmp_file_args['path']=curpath + changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + except Exception, e: + module.fail_json(path=path, msg='There was an issue creating %s as requested: %s' % (curpath, str(e))) # We already know prev_state is not 'absent', therefore it exists in some form. elif prev_state != 'directory': From 5c17a99a1cbb31d1b834f2f623e87d851ab2a140 Mon Sep 17 00:00:00 2001 From: Mike Putnam Date: Wed, 1 Jul 2015 20:58:17 -0500 Subject: [PATCH 120/386] Upstream docs show launch_config_name as required. http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_AutoScalingGroup.html Fixes #11209 Ansible behavior is correct, this commit just updates the docs to reflect that correctness. --- cloud/amazon/ec2_asg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 54d051375e6..eaeb141825e 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -43,7 +43,7 @@ options: launch_config_name: description: - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. - required: false + required: true min_size: description: - Minimum number of instances in group From ee782d822b6df37fbd19fd60ec1fc02c0de08265 Mon Sep 17 00:00:00 2001 From: whiter Date: Thu, 2 Jul 2015 15:24:39 +1000 Subject: [PATCH 121/386] Fixed dicts comparison for tags --- cloud/amazon/ec2_vpc_net.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py index 41186ed0ab2..ebdd4ed6504 100644 --- a/cloud/amazon/ec2_vpc_net.py +++ b/cloud/amazon/ec2_vpc_net.py @@ -145,7 +145,7 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name): tags.update({'Name': name}) try: current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id})) - if sorted(current_tags) != sorted(tags): + if cmp(tags, current_tags): vpc.create_tags(vpc_obj.id, tags) return True else: From 3d9a8caa5942b0efc8012b1b8080b5ce04bf652d Mon Sep 17 00:00:00 2001 From: Flyte Date: Wed, 1 Jul 2015 09:32:58 +0100 Subject: [PATCH 122/386] Provide correct kwargs to rds2 connection when making a final snapshot --- cloud/amazon/rds.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 71ead8ad10b..3d6f192b9ab 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -715,7 +715,10 @@ def delete_db_instance_or_snapshot(module, conn): if instance_name: if snapshot: params["skip_final_snapshot"] = False - params["final_snapshot_id"] = snapshot + if has_rds2: + params["final_db_snapshot_identifier"] = snapshot + else: + params["final_snapshot_id"] = snapshot else: params["skip_final_snapshot"] = True result = conn.delete_db_instance(instance_name, **params) From 8f0d462fd0e966fbb04e4fbcf4685a2fd600fee0 Mon Sep 17 00:00:00 2001 From: Robb Wagoner Date: Thu, 2 Jul 2015 06:16:43 -0700 Subject: [PATCH 123/386] remove double dict & fix increment bug --- cloud/amazon/ec2_elb_lb.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 9d626a98194..04be9e2813c 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -393,11 +393,11 @@ class ElbManager(object): # status of instances behind the ELB if info['instances']: - info['instance_health'] = [ dict({ - "instance_id": instance_state.instance_id, - "reason_code": instance_state.reason_code, - "state": instance_state.state, - }) for instance_state in self.elb_conn.describe_instance_health(self.name)] + info['instance_health'] = [ dict( + instance_id = instance_state.instance_id, + reason_code = instance_state.reason_code, + state = instance_state.state + ) for instance_state in self.elb_conn.describe_instance_health(self.name)] else: info['instance_health'] = [] @@ -409,7 +409,7 @@ class ElbManager(object): elif instance_state['state'] == "OutOfService": info['out_of_service_count'] += 1 else: - info['unknown_instance_state_count'] =+ 1 + info['unknown_instance_state_count'] += 1 if check_elb.health_check: info['health_check'] = { From 93754b903f6956a86891197debb83f801b809200 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Jul 2015 14:43:21 -0400 Subject: [PATCH 124/386] updated upgrade to a more sensible default as the previous was prone to confusion fixes #1667 --- packaging/os/apt.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 9172c69763d..19a7c426f5e 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -80,8 +80,8 @@ options: - 'Note: This does not upgrade a specific package, use state=latest for that.' version_added: "1.1" required: false - default: "yes" - choices: [ "yes", "safe", "full", "dist"] + default: "no" + choices: [ "no", "yes", "safe", "full", "dist"] dpkg_options: description: - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' @@ -548,7 +548,7 @@ def main(): default_release = dict(default=None, aliases=['default-release']), install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), - upgrade = dict(choices=['yes', 'safe', 'full', 'dist']), + upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) ), mutually_exclusive = [['package', 'upgrade', 'deb']], @@ -572,6 +572,10 @@ def main(): APT_GET_CMD = module.get_bin_path("apt-get") p = module.params + + if p['upgrade'] == 'no': + p['upgrade'] = None + if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]: module.fail_json(msg="Could not find aptitude. Please ensure it is installed.") From 6c0e26480629ab57c3be9fbc57ec38796831e341 Mon Sep 17 00:00:00 2001 From: Mike Putnam Date: Fri, 3 Jul 2015 22:57:53 -0500 Subject: [PATCH 125/386] Add missing GPLv3 License header Fixes #1643 --- cloud/amazon/ec2_eip.py | 15 +++++++++++++++ cloud/amazon/ec2_group.py | 15 ++++++++++++++- cloud/amazon/ec2_key.py | 15 ++++++++++++++- cloud/amazon/ec2_scaling_policy.py | 14 ++++++++++++++ cloud/openstack/keystone_user.py | 14 ++++++++++++++ cloud/vmware/vsphere_guest.py | 14 ++++++++++++++ commands/raw.py | 15 +++++++++++++++ commands/script.py | 14 ++++++++++++++ commands/shell.py | 15 +++++++++++++++ files/fetch.py | 15 +++++++++++++++ files/template.py | 15 +++++++++++++++ inventory/add_host.py | 15 +++++++++++++++ inventory/group_by.py | 15 +++++++++++++++ utilities/logic/pause.py | 15 +++++++++++++++ windows/win_template.py | 15 +++++++++++++++ 15 files changed, 219 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index c3b764b2e63..6a937ed2d06 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -1,4 +1,19 @@ #!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: ec2_eip diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index bde2f5cc19e..747fb839566 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -1,6 +1,19 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . DOCUMENTATION = ''' --- diff --git a/cloud/amazon/ec2_key.py b/cloud/amazon/ec2_key.py index a9217bd69db..98ea28d0dce 100644 --- a/cloud/amazon/ec2_key.py +++ b/cloud/amazon/ec2_key.py @@ -1,6 +1,19 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . DOCUMENTATION = ''' --- diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py index 10f03e9fc46..2856644ee9c 100644 --- a/cloud/amazon/ec2_scaling_policy.py +++ b/cloud/amazon/ec2_scaling_policy.py @@ -1,4 +1,18 @@ #!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . DOCUMENTATION = """ module: ec2_scaling_policy diff --git a/cloud/openstack/keystone_user.py b/cloud/openstack/keystone_user.py index de5eed598c7..a3529c290b3 100644 --- a/cloud/openstack/keystone_user.py +++ b/cloud/openstack/keystone_user.py @@ -1,5 +1,19 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . # Based on Jimmy Tang's implementation diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index f0239544cec..3cd4f51523c 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -1,6 +1,20 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . # TODO: # Ability to set CPU/Memory reservations diff --git a/commands/raw.py b/commands/raw.py index 5305c978630..a03ab828bb5 100644 --- a/commands/raw.py +++ b/commands/raw.py @@ -1,5 +1,20 @@ # this is a virtual module that is entirely implemented server side +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: raw diff --git a/commands/script.py b/commands/script.py index ccf15331a6c..9fed7928ce0 100644 --- a/commands/script.py +++ b/commands/script.py @@ -1,3 +1,17 @@ +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . DOCUMENTATION = """ --- diff --git a/commands/shell.py b/commands/shell.py index cccc90f05ff..23d4962e55f 100644 --- a/commands/shell.py +++ b/commands/shell.py @@ -2,6 +2,21 @@ # it runs the 'command' module with special arguments and it behaves differently. # See the command source and the comment "#USE_SHELL". +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: shell diff --git a/files/fetch.py b/files/fetch.py index b8234374976..d0b1371c306 100644 --- a/files/fetch.py +++ b/files/fetch.py @@ -1,5 +1,20 @@ # this is a virtual module that is entirely implemented server side +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: fetch diff --git a/files/template.py b/files/template.py index a1dc72c27bd..a419f0d2088 100644 --- a/files/template.py +++ b/files/template.py @@ -1,5 +1,20 @@ # this is a virtual module that is entirely implemented server side +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: template diff --git a/inventory/add_host.py b/inventory/add_host.py index 2ab76b4c16a..ef01ed1051b 100644 --- a/inventory/add_host.py +++ b/inventory/add_host.py @@ -1,5 +1,20 @@ # -*- mode: python -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: add_host diff --git a/inventory/group_by.py b/inventory/group_by.py index f63bdf5912b..4bfd20206be 100644 --- a/inventory/group_by.py +++ b/inventory/group_by.py @@ -1,5 +1,20 @@ # -*- mode: python -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: group_by diff --git a/utilities/logic/pause.py b/utilities/logic/pause.py index f1d10bf017f..0fad09ea7bc 100644 --- a/utilities/logic/pause.py +++ b/utilities/logic/pause.py @@ -1,5 +1,20 @@ # -*- mode: python -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: pause diff --git a/windows/win_template.py b/windows/win_template.py index c384ad7775f..e8323362dd6 100644 --- a/windows/win_template.py +++ b/windows/win_template.py @@ -1,5 +1,20 @@ # this is a virtual module that is entirely implemented server side +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: win_template From c3c2e6ab726f9ea28a7a5d37b2a466740843bb9a Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Sat, 4 Jul 2015 14:45:21 -0400 Subject: [PATCH 126/386] Update cloudformation.py Fix for inaccurate phrasing --- cloud/amazon/cloudformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index cccdd156f20..abde0ec375c 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -51,7 +51,7 @@ options: template: description: - The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present" - Must give full path to the file, relative to the playbook. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" + Must give full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" required: false default: null aliases: [] From ae1126c217bc48847e35311785ede1011d5e544f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Mon, 6 Oct 2014 23:32:53 -0400 Subject: [PATCH 127/386] Add recursive support for ACL module (fix #5053, fix #5550, fix #7276) --- files/acl.py | 229 ++++++++++++++++++++++++++------------------------- 1 file changed, 117 insertions(+), 112 deletions(-) diff --git a/files/acl.py b/files/acl.py index 0c924fee94c..808e854ad84 100644 --- a/files/acl.py +++ b/files/acl.py @@ -79,6 +79,14 @@ options: description: - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields. + recursive: + version_added: "@@@" + required: false + default: no + choices: [ 'yes', 'no' ] + description: + - Recursively sets the specified ACL (added in Ansible @@@). Incompatible with C(state=query). + author: "Brian Coca (@bcoca)" notes: - The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed. @@ -110,20 +118,6 @@ acl: sample: [ "user::rwx", "group::rwx", "other::rwx" ] ''' -def normalize_permissions(p): - perms = ['-','-','-'] - for char in p: - if char == 'r': - perms[0] = 'r' - if char == 'w': - perms[1] = 'w' - if char == 'x': - perms[2] = 'x' - if char == 'X': - if perms[2] != 'x': # 'x' is more permissive - perms[2] = 'X' - return ''.join(perms) - def split_entry(entry): ''' splits entry and ensures normalized return''' @@ -132,9 +126,9 @@ def split_entry(entry): if len(a) == 3: a.append(False) try: - p,e,t,d = a + p, e, t, d = a except ValueError, e: - print "wtf?? %s => %s" % (entry,a) + print "wtf?? %s => %s" % (entry, a) raise e if d: @@ -151,69 +145,87 @@ def split_entry(entry): else: t = None - p = normalize_permissions(p) + return [d, t, e, p] - return [d,t,e,p] -def get_acls(module,path,follow): +def build_entry(etype, entity, permissions=None): + '''Builds and returns an entry string. Does not include the permissions bit if they are not provided.''' + return etype + ':' + entity + (':' + permissions if permissions else '') + + +def build_command(module, mode, path, follow, default, recursive, entry=''): + '''Builds and returns agetfacl/setfacl command.''' + if mode == 'set': + cmd = [module.get_bin_path('setfacl', True)] + cmd.append('-m "%s"' % entry) + elif mode == 'rm': + cmd = [module.get_bin_path('setfacl', True)] + cmd.append('-x "%s"' % entry) + else: # mode == 'get' + cmd = [module.get_bin_path('getfacl', True)] + cmd.append('--omit-header') + cmd.append('--absolute-names') + + if recursive: + cmd.append('--recursive') - cmd = [ module.get_bin_path('getfacl', True) ] if not follow: cmd.append('-h') - # prevents absolute path warnings and removes headers - cmd.append('--omit-header') - cmd.append('--absolute-names') - cmd.append(path) - return _run_acl(module,cmd) - -def set_acl(module,path,entry,follow,default): - - cmd = [ module.get_bin_path('setfacl', True) ] - if not follow: - cmd.append('-h') if default: - cmd.append('-d') - cmd.append('-m "%s"' % entry) + if(mode == 'rm'): + cmd.append('-k') + else: # mode == 'set' or mode == 'get' + cmd.append('-d') + cmd.append(path) + return cmd - return _run_acl(module,cmd) -def rm_acl(module,path,entry,follow,default): +def acl_changed(module, cmd): + '''Returns true if the provided command affects the existing ACLs, false otherwise.''' + cmd = cmd[:] # lists are mutables so cmd would be overriden without this + cmd.insert(1, '--test') + lines = run_acl(module, cmd) + return not all(line.endswith('*,*') for line in lines) - cmd = [ module.get_bin_path('setfacl', True) ] - if not follow: - cmd.append('-h') - if default: - cmd.append('-k') - entry = entry[0:entry.rfind(':')] - cmd.append('-x "%s"' % entry) - cmd.append(path) - return _run_acl(module,cmd,False) - -def _run_acl(module,cmd,check_rc=True): +def run_acl(module, cmd, check_rc=True): try: (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) except Exception, e: module.fail_json(msg=e.strerror) - # trim last line as it is always empty - ret = out.splitlines() - return ret[0:len(ret)-1] + lines = out.splitlines() + if lines and not lines[-1].split(): + # trim last line only when it is empty + return lines[:-1] + else: + return lines + def main(): module = AnsibleModule( - argument_spec = dict( - name = dict(required=True,aliases=['path'], type='str'), - entry = dict(required=False, etype='str'), - entity = dict(required=False, type='str', default=''), - etype = dict(required=False, choices=['other', 'user', 'group', 'mask'], type='str'), - permissions = dict(required=False, type='str'), - state = dict(required=False, default='query', choices=[ 'query', 'present', 'absent' ], type='str'), - follow = dict(required=False, type='bool', default=True), - default= dict(required=False, type='bool', default=False), + argument_spec=dict( + name=dict(required=True, aliases=['path'], type='str'), + entry=dict(required=False, type='str'), + entity=dict(required=False, type='str', default=''), + etype=dict( + required=False, + choices=['other', 'user', 'group', 'mask'], + type='str' + ), + permissions=dict(required=False, type='str'), + state=dict( + required=False, + default='query', + choices=['query', 'present', 'absent'], + type='str' + ), + follow=dict(required=False, type='bool', default=True), + default=dict(required=False, type='bool', default=False), + recursive=dict(required=False, type='bool', default=False), ), supports_check_mode=True, ) @@ -226,79 +238,72 @@ def main(): state = module.params.get('state') follow = module.params.get('follow') default = module.params.get('default') - - if permissions: - permissions = normalize_permissions(permissions) + recursive = module.params.get('recursive') if not os.path.exists(path): - module.fail_json(msg="path not found or not accessible!") + module.fail_json(msg="Path not found or not accessible.") - if state in ['present','absent']: - if not entry and not etype: - module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state) + if state == 'query' and recursive: + module.fail_json(msg="'recursive' MUST NOT be set when 'state=query'.") + + if not entry: + if state == 'absent' and permissions: + module.fail_json(msg="'permissions' MUST NOT be set when 'state=absent'.") + + if state == 'absent' and not entity: + module.fail_json(msg="'entity' MUST be set when 'state=absent'.") + + if state in ['present', 'absent'] and not etype: + module.fail_json(msg="'etype' MUST be set when 'state=%s'." % state) if entry: if etype or entity or permissions: - module.fail_json(msg="entry and another incompatible field (entity, etype or permissions) are also set") - if entry.count(":") not in [2,3]: - module.fail_json(msg="Invalid entry: '%s', it requires 3 or 4 sections divided by ':'" % entry) + module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.") + + if state == 'present' and entry.count(":") != 3: + module.fail_json(msg="'entry' MUST have 3 sections divided by ':' when 'state=present'.") + + if state == 'absent' and entry.count(":") != 2: + module.fail_json(msg="'entry' MUST have 2 sections divided by ':' when 'state=absent'.") default, etype, entity, permissions = split_entry(entry) - changed=False + changed = False msg = "" - currentacls = get_acls(module,path,follow) - if (state == 'present'): - matched = False - for oldentry in currentacls: - if oldentry.count(":") == 0: - continue - old_default, old_type, old_entity, old_permissions = split_entry(oldentry) - if old_default == default: - if old_type == etype: - if etype in ['user', 'group']: - if old_entity == entity: - matched = True - if not old_permissions == permissions: - changed = True - break - else: - matched = True - if not old_permissions == permissions: - changed = True - break - if not matched: - changed=True + if state == 'present': + entry = build_entry(etype, entity, permissions) + command = build_command( + module, 'set', path, follow, + default, recursive, entry + ) + changed = acl_changed(module, command) if changed and not module.check_mode: - set_acl(module,path,':'.join([etype, str(entity), permissions]),follow,default) - msg="%s is present" % ':'.join([etype, str(entity), permissions]) + run_acl(module, command) + msg = "%s is present" % entry elif state == 'absent': - for oldentry in currentacls: - if oldentry.count(":") == 0: - continue - old_default, old_type, old_entity, old_permissions = split_entry(oldentry) - if old_default == default: - if old_type == etype: - if etype in ['user', 'group']: - if old_entity == entity: - changed=True - break - else: - changed=True - break + entry = build_entry(etype, entity) + command = build_command( + module, 'rm', path, follow, + default, recursive, entry + ) + changed = acl_changed(module, command) + if changed and not module.check_mode: - rm_acl(module,path,':'.join([etype, entity, '---']),follow,default) - msg="%s is absent" % ':'.join([etype, entity, '---']) + run_acl(module, command, False) + msg = "%s is absent" % entry + else: - msg="current acl" + msg = "current acl" - if changed: - currentacls = get_acls(module,path,follow) + acl = run_acl( + module, + build_command(module, 'get', path, follow, default, recursive) + ) - module.exit_json(changed=changed, msg=msg, acl=currentacls) + module.exit_json(changed=changed, msg=msg, acl=acl) # import module snippets from ansible.module_utils.basic import * From c786202ee4b6a89cc509348006bfcbeab90a9819 Mon Sep 17 00:00:00 2001 From: Tal Auslander Date: Mon, 6 Jul 2015 11:22:23 +0300 Subject: [PATCH 128/386] use ConvertTo-Bool for the force parameter --- windows/win_get_url.ps1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 23463b681c0..02f19b39360 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -40,9 +40,9 @@ Else { Fail-Json $result "missing required argument: dest" } -$force = Get-Attr -obj $params -name "force" "no" +$force = Get-Attr -obj $params -name "force" "no" | ConvertTo-Bool -If ($force -eq "yes" -or -not (Test-Path $dest)) { +If ($force -or -not (Test-Path $dest)) { $client = New-Object System.Net.WebClient Try { From 110f618487de51ba41cf0ce94f2d5574c6f54d09 Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Wed, 15 Apr 2015 13:43:00 +0300 Subject: [PATCH 129/386] Add support for specifying unique hosted zone identifier --- cloud/amazon/route53.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index b1c8591b25c..30557a2212a 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -35,6 +35,12 @@ options: required: true default: null aliases: [] + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone to modify + required: false + default: null + aliases: [] record: description: - The full DNS record to create or delete @@ -195,6 +201,17 @@ EXAMPLES = ''' alias=True alias_hosted_zone_id="{{ elb_zone_id }}" +# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value +# that the entire parameter list must be quoted: +- route53: + command: "create" + zone: "foo.com" + hostes_zone_id: "Z2AABBCCDDEEFF" + record: "localhost.foo.com" + type: "AAAA" + ttl: "7200" + value: "::1" + # Use a routing policy to distribute traffic: - route53: command: "create" @@ -252,6 +269,7 @@ def main(): argument_spec.update(dict( command = dict(choices=['get', 'create', 'delete'], required=True), zone = dict(required=True), + hosted_zone_id = dict(required=False), record = dict(required=True), ttl = dict(required=False, type='int', default=3600), type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), @@ -275,6 +293,7 @@ def main(): command_in = module.params.get('command') zone_in = module.params.get('zone').lower() + hosted_zone_id_in = module.params.get('hosted_zone_id') ttl_in = module.params.get('ttl') record_in = module.params.get('record').lower() type_in = module.params.get('type') @@ -360,6 +379,8 @@ def main(): record['region'] = rset.region record['failover'] = rset.failover record['health_check'] = rset.health_check + if hosted_zone_id_in: + record['hosted_zone_id'] = hosted_zone_id_in if rset.alias_dns_name: record['alias'] = True record['value'] = rset.alias_dns_name From 71ebe6321b241501e40f0908ce84daf7e918ac8d Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Wed, 15 Apr 2015 13:43:00 +0300 Subject: [PATCH 130/386] Add support for specifying unique hosted zone identifier --- cloud/amazon/route53.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 30557a2212a..8dd781ffdf2 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -212,6 +212,17 @@ EXAMPLES = ''' ttl: "7200" value: "::1" +# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value +# that the entire parameter list must be quoted: +- route53: + command: "create" + zone: "foo.com" + hostes_zone_id: "Z2AABBCCDDEEFF" + record: "localhost.foo.com" + type: "AAAA" + ttl: "7200" + value: "::1" + # Use a routing policy to distribute traffic: - route53: command: "create" @@ -374,6 +385,8 @@ def main(): record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) + if hosted_zone_id_in: + record['hosted_zone_id'] = hosted_zone_id_in record['identifier'] = rset.identifier record['weight'] = rset.weight record['region'] = rset.region From f0ad6c5a1fd3f93d776097619231d1cd4860e520 Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Mon, 6 Jul 2015 13:02:24 +0300 Subject: [PATCH 131/386] Fix hosted_zone_id after rebase. --- cloud/amazon/route53.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 8dd781ffdf2..a981c6ef2be 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -250,13 +250,13 @@ try: except ImportError: HAS_BOTO = False -def get_zone_by_name(conn, module, zone_name, want_private): - """Finds a zone by name""" +def get_zone_by_name(conn, module, zone_name, want_private, zone_id): + """Finds a zone by name or zone_id""" for zone in conn.get_zones(): # only save this zone id if the private status of the zone matches # the private_zone_in boolean specified in the params private_zone = module.boolean(zone.config.get('PrivateZone', False)) - if private_zone == want_private and zone.name == zone_name: + if private_zone == want_private and ((zone.name == zone_name and zone_id == None) or zone.id.replace('/hostedzone/', '') == zone_id): return zone return None @@ -280,7 +280,7 @@ def main(): argument_spec.update(dict( command = dict(choices=['get', 'create', 'delete'], required=True), zone = dict(required=True), - hosted_zone_id = dict(required=False), + hosted_zone_id = dict(required=False, default=None), record = dict(required=True), ttl = dict(required=False, type='int', default=3600), type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), @@ -351,7 +351,7 @@ def main(): module.fail_json(msg = e.error_message) # Find the named zone ID - zone = get_zone_by_name(conn, module, zone_in, private_zone_in) + zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in) # Verify that the requested zone is already defined in Route53 if zone is None: From 228c03bd670449813c3d3d45fa4a7767ad924774 Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Mon, 6 Jul 2015 13:07:33 +0300 Subject: [PATCH 132/386] Add version number --- cloud/amazon/route53.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index a981c6ef2be..e81e5083763 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -39,6 +39,7 @@ options: description: - The Hosted Zone ID of the DNS zone to modify required: false + version_added: 2.0 default: null aliases: [] record: From 041dc8b5877eabd130c79618dedcebb40d3c138b Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Mon, 6 Jul 2015 13:08:46 +0300 Subject: [PATCH 133/386] Remove empty aliases --- cloud/amazon/route53.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index e81e5083763..f9702cc38ae 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -41,7 +41,6 @@ options: required: false version_added: 2.0 default: null - aliases: [] record: description: - The full DNS record to create or delete From 08b2f3191537b95b6056067aad0416ea0b881b82 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 16:37:07 -0400 Subject: [PATCH 134/386] Add OpenStack Security Group Rule module --- cloud/openstack/os_security_group_rule.py | 154 ++++++++++++++++++++++ 1 file changed, 154 insertions(+) create mode 100644 cloud/openstack/os_security_group_rule.py diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py new file mode 100644 index 00000000000..d539cf91ee5 --- /dev/null +++ b/cloud/openstack/os_security_group_rule.py @@ -0,0 +1,154 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade +except ImportError: + print("failed=True msg='shade is required for this module'") + + +DOCUMENTATION = ''' +--- +module: os_security_group_rule +short_description: Add/Delete rule from an existing security group +extends_documentation_fragment: openstack +version_added: "1.10" +description: + - Add or Remove rule from an existing security group +options: + security_group: + description: + - Name of the security group + required: true + protocol: + description: + - IP protocol + choices: ['tcp', 'udp', 'icmp'] + default: tcp + port_range_min: + description: + - Starting port + required: true + port_range_max: + description: + - Ending port + required: true + remote_ip_prefix: + description: + - Source IP address(es) in CIDR notation (exclusive with remote_group) + required: false + remote_group: + description: + - ID of Security group to link (exclusive with remote_ip_prefix) + required: false + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present + +requirements: ["shade"] +''' +# TODO(mordred): add ethertype and direction + +EXAMPLES = ''' +# Create a security group rule +- os_security_group_rule: cloud=mordred + security_group=group foo + protocol: tcp + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: 0.0.0.0/0 +''' + + +def _security_group_rule(module, nova_client, action='create', **kwargs): + f = getattr(nova_client.security_group_rules, action) + try: + secgroup = f(**kwargs) + except Exception, e: + module.fail_json(msg='Failed to %s security group rule: %s' % + (action, e.message)) + + +def _get_rule_from_group(module, secgroup): + for rule in secgroup.rules: + if (rule['ip_protocol'] == module.params['protocol'] and + rule['from_port'] == module.params['port_range_min'] and + rule['to_port'] == module.params['port_range_max'] and + rule['ip_range']['cidr'] == module.params['remote_ip_prefix']): + return rule + return None + +def main(): + + argument_spec = openstack_full_argument_spec( + security_group = dict(required=True), + protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), + port_range_min = dict(required=True), + port_range_max = dict(required=True), + remote_ip_prefix = dict(required=False, default=None), + # TODO(mordred): Make remote_group handle name and id + remote_group = dict(required=False, default=None), + state = dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[ + ['remote_ip_prefix', 'remote_group'], + ) + module = AnsibleModule(argument_spec, **module_kwargs) + + try: + cloud = shade.openstack_cloud(**module.params) + nova_client = cloud.nova_client + changed = False + + secgroup = cloud.get_security_group(module.params['security_group']) + + if module.params['state'] == 'present': + if not secgroup: + module.fail_json(msg='Could not find security group %s' % + module.params['security_group']) + + if not _get_rule_from_group(module, secgroup): + _security_group_rule(module, nova_client, 'create', + parent_group_id=secgroup.id, + ip_protocol=module.params['protocol'], + from_port=module.params['port_range_min'], + to_port=module.params['port_range_max'], + cidr=module.params['remote_ip'], + group_id=module.params['remote_group'], + changed = True + + + if module.params['state'] == 'absent' and secgroup: + rule = _get_rule_from_group(module, secgroup) + if secgroup and rule: + _security_group_rule(module, nova_client, 'delete', + rule=rule['id']) + changed = True + + module.exit_json(changed=changed, result="success") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From 08b4bb42c4ad99a9c43193b40f62220240da0af8 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 4 Jun 2015 12:03:05 -0400 Subject: [PATCH 135/386] Fix example code syntax --- cloud/openstack/os_security_group_rule.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index d539cf91ee5..8422a920791 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -68,12 +68,13 @@ requirements: ["shade"] EXAMPLES = ''' # Create a security group rule -- os_security_group_rule: cloud=mordred - security_group=group foo - protocol: tcp - port_range_min: 80 - port_range_max: 80 - remote_ip_prefix: 0.0.0.0/0 +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: tcp + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: 0.0.0.0/0 ''' From a9301ba918736db08cf5b2b160f91ed4724ba7e8 Mon Sep 17 00:00:00 2001 From: Davide Guerri Date: Thu, 4 Jun 2015 17:34:21 +0100 Subject: [PATCH 136/386] Fix invalid syntax in openstack_module_kwargs call --- cloud/openstack/os_security_group_rule.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 8422a920791..903d694bab3 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -111,6 +111,7 @@ def main(): module_kwargs = openstack_module_kwargs( mutually_exclusive=[ ['remote_ip_prefix', 'remote_group'], + ] ) module = AnsibleModule(argument_spec, **module_kwargs) From d35df1f2170f8347af4b49548a03d265c9b69e15 Mon Sep 17 00:00:00 2001 From: dagnello Date: Mon, 8 Jun 2015 18:27:40 -0700 Subject: [PATCH 137/386] Minor fixes for os_security_group_rule module Was not able to use this module as it was. The changes submitted resolved the issues I ran into in order to get it working. --- cloud/openstack/os_security_group_rule.py | 24 +++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 903d694bab3..fc5397439c0 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -21,13 +21,12 @@ try: except ImportError: print("failed=True msg='shade is required for this module'") - DOCUMENTATION = ''' --- module: os_security_group_rule short_description: Add/Delete rule from an existing security group extends_documentation_fragment: openstack -version_added: "1.10" +version_added: "2.0" description: - Add or Remove rule from an existing security group options: @@ -61,7 +60,6 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present - requirements: ["shade"] ''' # TODO(mordred): add ethertype and direction @@ -84,7 +82,7 @@ def _security_group_rule(module, nova_client, action='create', **kwargs): secgroup = f(**kwargs) except Exception, e: module.fail_json(msg='Failed to %s security group rule: %s' % - (action, e.message)) + (action, e.message)) def _get_rule_from_group(module, secgroup): @@ -92,12 +90,14 @@ def _get_rule_from_group(module, secgroup): if (rule['ip_protocol'] == module.params['protocol'] and rule['from_port'] == module.params['port_range_min'] and rule['to_port'] == module.params['port_range_max'] and - rule['ip_range']['cidr'] == module.params['remote_ip_prefix']): + (rule['ip_range']['cidr'] if 'cidr' in rule['ip_range'] + else None) == (module.params['remote_ip_prefix'] if + 'remote_ip_prefix' in module.params else None)): return rule return None -def main(): +def main(): argument_spec = openstack_full_argument_spec( security_group = dict(required=True), protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), @@ -133,11 +133,14 @@ def main(): ip_protocol=module.params['protocol'], from_port=module.params['port_range_min'], to_port=module.params['port_range_max'], - cidr=module.params['remote_ip'], - group_id=module.params['remote_group'], + cidr=module.params['remote_ip_prefix'] + if 'remote_ip_prefix' in module.params else None, + group_id=module.params['remote_group'] + if 'remote_group' in module.params else + None + ) changed = True - if module.params['state'] == 'absent' and secgroup: rule = _get_rule_from_group(module, secgroup) if secgroup and rule: @@ -153,4 +156,5 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() + +main() \ No newline at end of file From b98e6663e8cdd29eecb6614ae12a814df972441e Mon Sep 17 00:00:00 2001 From: dagnello Date: Mon, 8 Jun 2015 18:27:40 -0700 Subject: [PATCH 138/386] Minor fixes for os_security_group_rule module Was not able to use this module as it was. The changes submitted resolved the issues I ran into in order to get it working. --- cloud/openstack/os_security_group_rule.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index fc5397439c0..efbc41f1148 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -99,14 +99,14 @@ def _get_rule_from_group(module, secgroup): def main(): argument_spec = openstack_full_argument_spec( - security_group = dict(required=True), - protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), - port_range_min = dict(required=True), - port_range_max = dict(required=True), - remote_ip_prefix = dict(required=False, default=None), + security_group = dict(required=True), + protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), + port_range_min = dict(required=True), + port_range_max = dict(required=True), + remote_ip_prefix = dict(required=False, default=None), # TODO(mordred): Make remote_group handle name and id - remote_group = dict(required=False, default=None), - state = dict(default='present', choices=['absent', 'present']), + remote_group = dict(required=False, default=None), + state = dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs( mutually_exclusive=[ @@ -157,4 +157,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() \ No newline at end of file +main() From 8f2e70a1c156bde17a117c5a212c71d4d67ccd8f Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 10 Jun 2015 17:31:43 -0400 Subject: [PATCH 139/386] Update rules mode for latest shade Shade 0.7.0 normalized the security group data that is returned, when using nova, to look more like neutron security group data. This adjusts for that change. --- cloud/openstack/os_security_group_rule.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index efbc41f1148..42e0a6bc6ed 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -86,13 +86,11 @@ def _security_group_rule(module, nova_client, action='create', **kwargs): def _get_rule_from_group(module, secgroup): - for rule in secgroup.rules: - if (rule['ip_protocol'] == module.params['protocol'] and - rule['from_port'] == module.params['port_range_min'] and - rule['to_port'] == module.params['port_range_max'] and - (rule['ip_range']['cidr'] if 'cidr' in rule['ip_range'] - else None) == (module.params['remote_ip_prefix'] if - 'remote_ip_prefix' in module.params else None)): + for rule in secgroup['security_group_rules']: + if (rule['protocol'] == module.params['protocol'] and + rule['port_range_min'] == module.params['port_range_min'] and + rule['port_range_max'] == module.params['port_range_max'] and + rule['remote_ip_prefix'] == module.params['remote_ip_prefix']): return rule return None @@ -136,9 +134,8 @@ def main(): cidr=module.params['remote_ip_prefix'] if 'remote_ip_prefix' in module.params else None, group_id=module.params['remote_group'] - if 'remote_group' in module.params else - None - ) + if 'remote_group' in module.params else None + ) changed = True if module.params['state'] == 'absent' and secgroup: From 5758b4ebdce00bbd18ef8e6967122ebcc6de0cde Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 10 Jun 2015 17:51:59 -0400 Subject: [PATCH 140/386] Fix id value reference --- cloud/openstack/os_security_group_rule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 42e0a6bc6ed..287f3021a35 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -127,7 +127,7 @@ def main(): if not _get_rule_from_group(module, secgroup): _security_group_rule(module, nova_client, 'create', - parent_group_id=secgroup.id, + parent_group_id=secgroup['id'], ip_protocol=module.params['protocol'], from_port=module.params['port_range_min'], to_port=module.params['port_range_max'], From 5b6c6cac20bc6e1111e0175b2e77c7c3f61a69b5 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 10 Jun 2015 18:06:11 -0400 Subject: [PATCH 141/386] Recongnize None and -1 port equivalency shade 0.7.0 represents disabled min/max ports as None (in the neutron style) rather than -1. Recognize this as the same as -1. --- cloud/openstack/os_security_group_rule.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 287f3021a35..64f67fbeec1 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -87,9 +87,12 @@ def _security_group_rule(module, nova_client, action='create', **kwargs): def _get_rule_from_group(module, secgroup): for rule in secgroup['security_group_rules']: + # No port, or -1, will be returned as None + port_range_min = rule['port_range_min'] or -1 + port_range_max = rule['port_range_max'] or -1 if (rule['protocol'] == module.params['protocol'] and - rule['port_range_min'] == module.params['port_range_min'] and - rule['port_range_max'] == module.params['port_range_max'] and + port_range_min == module.params['port_range_min'] and + port_range_max == module.params['port_range_max'] and rule['remote_ip_prefix'] == module.params['remote_ip_prefix']): return rule return None From 16b3b72294ec509b7d327af73ce750c9c25c437a Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Tue, 16 Jun 2015 14:56:04 -0400 Subject: [PATCH 142/386] Update secgroup rules module for latest shade This allows the rules module to work against either nova or neutron for handling security groups. New parameters for 'direction' and 'ethertype' are added. Check mode is supported with this version. --- cloud/openstack/os_security_group_rule.py | 141 ++++++++++++++-------- 1 file changed, 93 insertions(+), 48 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 64f67fbeec1..a5596558710 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -46,15 +46,27 @@ options: port_range_max: description: - Ending port - required: true + required: true remote_ip_prefix: description: - Source IP address(es) in CIDR notation (exclusive with remote_group) - required: false + required: false remote_group: description: - ID of Security group to link (exclusive with remote_ip_prefix) - required: false + required: false + ethertype: + description: + - Must be IPv4 or IPv6, and addresses represented in CIDR must + match the ingress or egress rules. Not all providers support IPv6. + choices: ['IPv4', 'IPv6'] + default: IPv4 + direction: + description: + - The direction in which the security group rule is applied. Not + all providers support egress. + choices: ['egress', 'ingress'] + default: ingress state: description: - Should the resource be present or absent. @@ -76,79 +88,112 @@ EXAMPLES = ''' ''' -def _security_group_rule(module, nova_client, action='create', **kwargs): - f = getattr(nova_client.security_group_rules, action) - try: - secgroup = f(**kwargs) - except Exception, e: - module.fail_json(msg='Failed to %s security group rule: %s' % - (action, e.message)) +def _find_matching_rule(module, secgroup): + """ + Find a rule in the group that matches the module parameters. + :returns: The matching rule dict, or None if no matches. + """ + protocol = module.params['protocol'] + port_range_min = module.params['port_range_min'] + port_range_max = module.params['port_range_max'] + remote_ip_prefix = module.params['remote_ip_prefix'] + ethertype = module.params['ethertype'] + direction = module.params['direction'] -def _get_rule_from_group(module, secgroup): for rule in secgroup['security_group_rules']: - # No port, or -1, will be returned as None - port_range_min = rule['port_range_min'] or -1 - port_range_max = rule['port_range_max'] or -1 - if (rule['protocol'] == module.params['protocol'] and - port_range_min == module.params['port_range_min'] and - port_range_max == module.params['port_range_max'] and - rule['remote_ip_prefix'] == module.params['remote_ip_prefix']): + # No port, or -1, will be returned from shade as None + rule_port_range_min = rule['port_range_min'] or -1 + rule_port_range_max = rule['port_range_max'] or -1 + + if (protocol == rule['protocol'] + and port_range_min == rule_port_range_min + and port_range_max == rule_port_range_max + and remote_ip_prefix == rule['remote_ip_prefix'] + and ethertype == rule['ethertype'] + and direction == rule['direction']): return rule return None +def _system_state_change(module, secgroup): + state = module.params['state'] + if secgroup: + rule_exists = _find_matching_rule(module, secgroup) + else: + return False + + if state == 'present' and not rule_exists: + return True + if state == 'absent' and rule_exists: + return True + return False + + def main(): argument_spec = openstack_full_argument_spec( - security_group = dict(required=True), - protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), - port_range_min = dict(required=True), - port_range_max = dict(required=True), - remote_ip_prefix = dict(required=False, default=None), + security_group = dict(required=True), + protocol = dict(default='tcp', + choices=['tcp', 'udp', 'icmp']), + port_range_min = dict(required=True), + port_range_max = dict(required=True), + remote_ip_prefix = dict(required=False, default=None), # TODO(mordred): Make remote_group handle name and id - remote_group = dict(required=False, default=None), - state = dict(default='present', choices=['absent', 'present']), + remote_group = dict(required=False, default=None), + ethertype = dict(default='IPv4', + choices=['IPv4', 'IPv6']), + direction = dict(default='ingress', + choices=['egress', 'ingress']), + state = dict(default='present', + choices=['absent', 'present']), ) + module_kwargs = openstack_module_kwargs( mutually_exclusive=[ ['remote_ip_prefix', 'remote_group'], ] ) - module = AnsibleModule(argument_spec, **module_kwargs) + + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + state = module.params['state'] + security_group = module.params['security_group'] + changed = False try: cloud = shade.openstack_cloud(**module.params) - nova_client = cloud.nova_client - changed = False + secgroup = cloud.get_security_group(security_group) - secgroup = cloud.get_security_group(module.params['security_group']) + if module.check_mode: + module.exit_json(changed=_system_state_change(module, secgroup)) - if module.params['state'] == 'present': + if state == 'present': if not secgroup: module.fail_json(msg='Could not find security group %s' % - module.params['security_group']) + security_group) - if not _get_rule_from_group(module, secgroup): - _security_group_rule(module, nova_client, 'create', - parent_group_id=secgroup['id'], - ip_protocol=module.params['protocol'], - from_port=module.params['port_range_min'], - to_port=module.params['port_range_max'], - cidr=module.params['remote_ip_prefix'] - if 'remote_ip_prefix' in module.params else None, - group_id=module.params['remote_group'] - if 'remote_group' in module.params else None - ) + if not _find_matching_rule(module, secgroup): + cloud.create_security_group_rule( + secgroup['id'], + port_range_min=module.params['port_range_min'], + port_range_max=module.params['port_range_max'], + protocol=module.params['protocol'], + remote_ip_prefix=module.params['remote_ip_prefix'], + remote_group_id=module.params['remote_group'], + direction=module.params['direction'], + ethertype=module.params['ethertype'] + ) changed = True - if module.params['state'] == 'absent' and secgroup: - rule = _get_rule_from_group(module, secgroup) - if secgroup and rule: - _security_group_rule(module, nova_client, 'delete', - rule=rule['id']) + if state == 'absent' and secgroup: + rule = _find_matching_rule(module, secgroup) + if rule: + cloud.delete_security_group_rule(rule['id']) changed = True - module.exit_json(changed=changed, result="success") + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 0e5942d7e7bfd703fad5797362a0ebe1572674e6 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 17 Jun 2015 07:39:27 -0400 Subject: [PATCH 143/386] Return rule object --- cloud/openstack/os_security_group_rule.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index a5596558710..15ce00866ae 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -174,8 +174,9 @@ def main(): module.fail_json(msg='Could not find security group %s' % security_group) - if not _find_matching_rule(module, secgroup): - cloud.create_security_group_rule( + rule = _find_matching_rule(module, secgroup): + if not rule: + rule = cloud.create_security_group_rule( secgroup['id'], port_range_min=module.params['port_range_min'], port_range_max=module.params['port_range_max'], @@ -186,6 +187,7 @@ def main(): ethertype=module.params['ethertype'] ) changed = True + module.exit_json(changed=changed, rule=rule, id=rule.id) if state == 'absent' and secgroup: rule = _find_matching_rule(module, secgroup) @@ -193,7 +195,7 @@ def main(): cloud.delete_security_group_rule(rule['id']) changed = True - module.exit_json(changed=changed) + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 9d0c8b0507a19e82a5dc23c9a8a8cac0b24c9f92 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 17 Jun 2015 08:30:55 -0400 Subject: [PATCH 144/386] Fix syntax error --- cloud/openstack/os_security_group_rule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 15ce00866ae..f50c97817e5 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -174,7 +174,7 @@ def main(): module.fail_json(msg='Could not find security group %s' % security_group) - rule = _find_matching_rule(module, secgroup): + rule = _find_matching_rule(module, secgroup) if not rule: rule = cloud.create_security_group_rule( secgroup['id'], From f027e759765e9dd7717b54c308ad1d46410c2cff Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Thu, 18 Jun 2015 19:22:56 -0400 Subject: [PATCH 145/386] Compare ports as strings Ports as returned from shade are ints. They are strings as they come in to the module. --- cloud/openstack/os_security_group_rule.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index f50c97817e5..eea47c0c3e1 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -103,8 +103,16 @@ def _find_matching_rule(module, secgroup): for rule in secgroup['security_group_rules']: # No port, or -1, will be returned from shade as None - rule_port_range_min = rule['port_range_min'] or -1 - rule_port_range_max = rule['port_range_max'] or -1 + if rule['port_range_min'] is None: + rule_port_range_min = "-1" + else: + rule_port_range_min = str(rule['port_range_min']) + + if rule['port_range_max'] is None: + rule_port_range_max = "-1" + else: + rule_port_range_max = str(rule['port_range_max']) + if (protocol == rule['protocol'] and port_range_min == rule_port_range_min From 2e8daa23309ada2bfba8415ea6ec5d764b565f05 Mon Sep 17 00:00:00 2001 From: dagnello Date: Fri, 19 Jun 2015 11:21:51 -0700 Subject: [PATCH 146/386] Resolving issues in rule comparison algorithm Port range min/max values are at times represented as string and compared to int equivalents. This fix explicitly ensures all port range values are ints for proper comparisons. --- cloud/openstack/os_security_group_rule.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index eea47c0c3e1..fc9283940c0 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -91,12 +91,11 @@ EXAMPLES = ''' def _find_matching_rule(module, secgroup): """ Find a rule in the group that matches the module parameters. - :returns: The matching rule dict, or None if no matches. """ protocol = module.params['protocol'] - port_range_min = module.params['port_range_min'] - port_range_max = module.params['port_range_max'] + port_range_min = int(module.params['port_range_min']) + port_range_max = int(module.params['port_range_max']) remote_ip_prefix = module.params['remote_ip_prefix'] ethertype = module.params['ethertype'] direction = module.params['direction'] @@ -104,14 +103,14 @@ def _find_matching_rule(module, secgroup): for rule in secgroup['security_group_rules']: # No port, or -1, will be returned from shade as None if rule['port_range_min'] is None: - rule_port_range_min = "-1" + rule_port_range_min = -1 else: - rule_port_range_min = str(rule['port_range_min']) + rule_port_range_min = int(rule['port_range_min']) if rule['port_range_max'] is None: - rule_port_range_max = "-1" + rule_port_range_max = -1 else: - rule_port_range_max = str(rule['port_range_max']) + rule_port_range_max = int(rule['port_range_max']) if (protocol == rule['protocol'] @@ -195,7 +194,7 @@ def main(): ethertype=module.params['ethertype'] ) changed = True - module.exit_json(changed=changed, rule=rule, id=rule.id) + module.exit_json(changed=changed, rule=rule, id=rule['id']) if state == 'absent' and secgroup: rule = _find_matching_rule(module, secgroup) @@ -212,4 +211,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() +main() \ No newline at end of file From 9f03302b68b4038fa664230dcbb66920325dbd1f Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 19 Jun 2015 17:17:46 -0400 Subject: [PATCH 147/386] Use int in the parameter list instead of casting --- cloud/openstack/os_security_group_rule.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index fc9283940c0..7d86408b379 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -94,8 +94,8 @@ def _find_matching_rule(module, secgroup): :returns: The matching rule dict, or None if no matches. """ protocol = module.params['protocol'] - port_range_min = int(module.params['port_range_min']) - port_range_max = int(module.params['port_range_max']) + port_range_min = module.params['port_range_min'] + port_range_max = module.params['port_range_max'] remote_ip_prefix = module.params['remote_ip_prefix'] ethertype = module.params['ethertype'] direction = module.params['direction'] @@ -142,8 +142,8 @@ def main(): security_group = dict(required=True), protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), - port_range_min = dict(required=True), - port_range_max = dict(required=True), + port_range_min = dict(required=True, type='int'), + port_range_max = dict(required=True, type='int'), remote_ip_prefix = dict(required=False, default=None), # TODO(mordred): Make remote_group handle name and id remote_group = dict(required=False, default=None), @@ -211,4 +211,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() \ No newline at end of file +main() From 8664c884174736b803089c3d4a199461dff0af9e Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Tue, 30 Jun 2015 16:51:18 -0400 Subject: [PATCH 148/386] Change required parameters for rules module The ports and protocol are no longer required (and now depends on a new version of shade). --- cloud/openstack/os_security_group_rule.py | 55 ++++++++++++++++++++--- 1 file changed, 49 insertions(+), 6 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 7d86408b379..2ec8e49b68d 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -18,8 +18,10 @@ try: import shade + HAS_SHADE = True except ImportError: - print("failed=True msg='shade is required for this module'") + HAS_SHADE = False + DOCUMENTATION = ''' --- @@ -87,6 +89,41 @@ EXAMPLES = ''' remote_ip_prefix: 0.0.0.0/0 ''' +RETURN = ''' +id: + description: Unique rule UUID. + type: string +direction: + description: The direction in which the security group rule is applied. + type: string + sample: 'egress' +ethertype: + description: One of IPv4 or IPv6. + type: string + sample: 'IPv4' +port_range_min: + description: The minimum port number in the range that is matched by + the security group rule. + type: int + sample: 8000 +port_range_max: + description: The maximum port number in the range that is matched by + the security group rule. + type: int + sample: 8000 +protocol: + description: The protocol that is matched by the security group rule. + type: string + sample: 'tcp' +remote_ip_prefix: + description: The remote IP prefix to be associated with this security group rule. + type: string + sample: '0.0.0.0/0' +security_group_id: + description: The security group ID to associate with this security group rule. + type: string +''' + def _find_matching_rule(module, secgroup): """ @@ -140,10 +177,12 @@ def _system_state_change(module, secgroup): def main(): argument_spec = openstack_full_argument_spec( security_group = dict(required=True), - protocol = dict(default='tcp', - choices=['tcp', 'udp', 'icmp']), - port_range_min = dict(required=True, type='int'), - port_range_max = dict(required=True, type='int'), + # NOTE(Shrews): None is an acceptable protocol value for + # Neutron, but Nova will balk at this. + protocol = dict(default=None, + choices=[None, 'tcp', 'udp', 'icmp']), + port_range_min = dict(required=False, type='int'), + port_range_max = dict(required=False, type='int'), remote_ip_prefix = dict(required=False, default=None), # TODO(mordred): Make remote_group handle name and id remote_group = dict(required=False, default=None), @@ -165,6 +204,9 @@ def main(): supports_check_mode=True, **module_kwargs) + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + state = module.params['state'] security_group = module.params['security_group'] changed = False @@ -211,4 +253,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() +if __name__ == '__main__': + main() From 6933407cd40bb655eaaa6847336421018a6b9b1e Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Mon, 6 Jul 2015 12:16:29 -0400 Subject: [PATCH 149/386] Correct port matching logic Port matching logic did not take into account recent shade change to equate (None, None) to (1, 65535) when Nova is the backend. Also, this encapsulates the port matching logic into a single function and heavily documents the logic. --- cloud/openstack/os_security_group_rule.py | 102 ++++++++++++++++++---- 1 file changed, 84 insertions(+), 18 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 2ec8e49b68d..7e0486d81db 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -76,7 +76,6 @@ options: default: present requirements: ["shade"] ''' -# TODO(mordred): add ethertype and direction EXAMPLES = ''' # Create a security group rule @@ -87,6 +86,38 @@ EXAMPLES = ''' port_range_min: 80 port_range_max: 80 remote_ip_prefix: 0.0.0.0/0 + +# Create a security group rule for ping +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: icmp + remote_ip_prefix: 0.0.0.0/0 + +# Another way to create the ping rule +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: icmp + port_range_min: -1 + port_range_max: -1 + remote_ip_prefix: 0.0.0.0/0 + +# Create a TCP rule covering all ports +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: tcp + port_range_min: 1 + port_range_max: 65535 + remote_ip_prefix: 0.0.0.0/0 + +# Another way to create the TCP rule above (defaults to all ports) +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: tcp + remote_ip_prefix: 0.0.0.0/0 ''' RETURN = ''' @@ -125,37 +156,72 @@ security_group_id: ''' +def _ports_match(protocol, module_min, module_max, rule_min, rule_max): + """ + Capture the complex port matching logic. + + The port values coming in for the module might be -1 (for ICMP), + which will work only for Nova, but this is handled by shade. Likewise, + they might be None, which works for Neutron, but not Nova. This too is + handled by shade. Since shade will consistently return these port + values as None, we need to convert any -1 values input to the module + to None here for comparison. + + For TCP and UDP protocols, None values for both min and max are + represented as the range 1-65535 for Nova, but remain None for + Neutron. Shade returns the full range when Nova is the backend (since + that is how Nova stores them), and None values for Neutron. If None + values are input to the module for both values, then we need to adjust + for comparison. + """ + + # Check if the user is supplying -1 for ICMP. + if protocol == 'icmp': + if module_min and int(module_min) == -1: + module_min = None + if module_max and int(module_max) == -1: + module_max = None + + # Check if user is supplying None values for full TCP/UDP port range. + if protocol in ['tcp', 'udp'] and module_min is None and module_max is None: + if (rule_min and int(rule_min) == 1 + and rule_max and int(rule_max) == 65535): + # (None, None) == (1, 65535) + return True + + # Sanity check to make sure we don't have type comparison issues. + if module_min: + module_min = int(module_min) + if module_max: + module_max = int(module_max) + if rule_min: + rule_min = int(rule_min) + if rule_max: + rule_max = int(rule_max) + + return module_min == rule_min and module_max == rule_max + + def _find_matching_rule(module, secgroup): """ Find a rule in the group that matches the module parameters. :returns: The matching rule dict, or None if no matches. """ protocol = module.params['protocol'] - port_range_min = module.params['port_range_min'] - port_range_max = module.params['port_range_max'] remote_ip_prefix = module.params['remote_ip_prefix'] ethertype = module.params['ethertype'] direction = module.params['direction'] for rule in secgroup['security_group_rules']: - # No port, or -1, will be returned from shade as None - if rule['port_range_min'] is None: - rule_port_range_min = -1 - else: - rule_port_range_min = int(rule['port_range_min']) - - if rule['port_range_max'] is None: - rule_port_range_max = -1 - else: - rule_port_range_max = int(rule['port_range_max']) - - if (protocol == rule['protocol'] - and port_range_min == rule_port_range_min - and port_range_max == rule_port_range_max and remote_ip_prefix == rule['remote_ip_prefix'] and ethertype == rule['ethertype'] - and direction == rule['direction']): + and direction == rule['direction'] + and _ports_match(protocol, + module.params['port_range_min'], + module.params['port_range_max'], + rule['port_range_min'], + rule['port_range_max'])): return rule return None From dd9c29286154d7643c0392d576e44ea4421ada3c Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 6 Jul 2015 18:52:11 -0400 Subject: [PATCH 150/386] Update docstring to show port ranges as optional --- cloud/openstack/os_security_group_rule.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 7e0486d81db..91059aca015 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -39,16 +39,18 @@ options: protocol: description: - IP protocol - choices: ['tcp', 'udp', 'icmp'] - default: tcp + choices: ['tcp', 'udp', 'icmp', None] + default: None port_range_min: description: - Starting port - required: true + required: false + default: None port_range_max: description: - Ending port - required: true + required: false + default: None remote_ip_prefix: description: - Source IP address(es) in CIDR notation (exclusive with remote_group) From 20f76d76a84635dbc62254b635fa31ebf4e8e96d Mon Sep 17 00:00:00 2001 From: Benjamin Baumer Date: Mon, 29 Jun 2015 13:45:08 +0200 Subject: [PATCH 151/386] Fix: Calling svn info to determine if dest is an svn Working Copy, to support updates in Subfolders with Subversion > 1.8 Fix: Ignoring svn:externals on local Modification Check. Add: Added Argument switch to alow skipping the svn switch call. --- source_control/subversion.py | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/source_control/subversion.py b/source_control/subversion.py index cae4702e174..84154d6dff0 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -78,6 +78,13 @@ options: version_added: "1.6" description: - If C(yes), do export instead of checkout/update. + switch: + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "1.6" + description: + - If C(no), do not call svn switch before update. ''' EXAMPLES = ''' @@ -103,7 +110,7 @@ class Subversion(object): self.password = password self.svn_path = svn_path - def _exec(self, args): + def _exec(self, args, check_rc=True): bits = [ self.svn_path, '--non-interactive', @@ -115,8 +122,20 @@ class Subversion(object): if self.password: bits.extend(["--password", self.password]) bits.extend(args) - rc, out, err = self.module.run_command(bits, check_rc=True) - return out.splitlines() + if check_rc: + rc, out, err = self.module.run_command(bits, check_rc) + return out.splitlines() + else: + rc, out, err = self.module.run_command(bits, check_rc) + return rc + + def is_svn_repo(self): + '''Checks if path is a SVN Repo.''' + rc = self._exec(["info", self.dest], check_rc=False) + if rc == 0: + return True + else: + return False def checkout(self): '''Creates new svn working directory if it does not already exist.''' @@ -153,8 +172,9 @@ class Subversion(object): def has_local_mods(self): '''True if revisioned files have been added or modified. Unrevisioned files are ignored.''' - lines = self._exec(["status", "--quiet", self.dest]) + lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest]) # The --quiet option will return only modified files. + # Has local mods if more than 0 modifed revisioned files. return len(filter(len, lines)) > 0 @@ -183,6 +203,7 @@ def main(): password=dict(required=False), executable=dict(default=None), export=dict(default=False, required=False, type='bool'), + switch=dict(default=True, required=False, type='bool'), ), supports_check_mode=True ) @@ -195,6 +216,7 @@ def main(): password = module.params['password'] svn_path = module.params['executable'] or module.get_bin_path('svn', True) export = module.params['export'] + switch = module.params['switch'] os.environ['LANG'] = 'C' svn = Subversion(module, dest, repo, revision, username, password, svn_path) @@ -208,7 +230,7 @@ def main(): svn.checkout() else: svn.export(force=force) - elif os.path.exists("%s/.svn" % (dest, )): + elif svn.is_svn_repo(): # Order matters. Need to get local mods before switch to avoid false # positives. Need to switch before revert to ensure we are reverting to # correct repo. @@ -217,7 +239,8 @@ def main(): module.exit_json(changed=check, before=before, after=after) before = svn.get_revision() local_mods = svn.has_local_mods() - svn.switch() + if switch: + svn.switch() if local_mods: if force: svn.revert() From 8255657ac9d2f9e1622ecd877109132c1db3c5f4 Mon Sep 17 00:00:00 2001 From: Benjamin Baumer Date: Tue, 7 Jul 2015 13:38:19 +0200 Subject: [PATCH 152/386] version_added for switch Parameter changed to 2.0 Add comment to explain check_rc Parameter in _exec Function Optimize code and clean up is_svn_repo Function --- source_control/subversion.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/source_control/subversion.py b/source_control/subversion.py index 84154d6dff0..24cc065c5a4 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -82,7 +82,7 @@ options: required: false default: "yes" choices: [ "yes", "no" ] - version_added: "1.6" + version_added: "2.0" description: - If C(no), do not call svn switch before update. ''' @@ -111,6 +111,7 @@ class Subversion(object): self.svn_path = svn_path def _exec(self, args, check_rc=True): + '''Execute a subversion command, and return output. If check_rc is False, returns the return code instead of the output.''' bits = [ self.svn_path, '--non-interactive', @@ -122,20 +123,16 @@ class Subversion(object): if self.password: bits.extend(["--password", self.password]) bits.extend(args) + rc, out, err = self.module.run_command(bits, check_rc) if check_rc: - rc, out, err = self.module.run_command(bits, check_rc) return out.splitlines() else: - rc, out, err = self.module.run_command(bits, check_rc) return rc def is_svn_repo(self): '''Checks if path is a SVN Repo.''' rc = self._exec(["info", self.dest], check_rc=False) - if rc == 0: - return True - else: - return False + return rc == 0 def checkout(self): '''Creates new svn working directory if it does not already exist.''' From b4911a47d1589693791bdf40ed979e239d69d6d2 Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Tue, 7 Jul 2015 16:31:47 +0300 Subject: [PATCH 153/386] Change the default flag value to None to prevent AWS complaining: "Instance creation failed => InvalidBlockDeviceMapping: the encrypted flag cannot be specified since device /dev/sda1 has a snapshot specified." --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index b79395fb3a1..840cf4fed1f 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -701,7 +701,7 @@ def create_block_device(module, ec2, volume): volume_type=volume.get('device_type'), delete_on_termination=volume.get('delete_on_termination', False), iops=volume.get('iops'), - encrypted=volume.get('encrypted', False)) + encrypted=volume.get('encrypted', None)) def boto_supports_param_in_spot_request(ec2, param): """ From c57d70ad207e1c5a935f7af7a93a890f44e364d1 Mon Sep 17 00:00:00 2001 From: Sean Chittenden Date: Tue, 7 Jul 2015 12:06:52 -0700 Subject: [PATCH 154/386] Fix group mod and group add for FreeBSD --- system/group.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/system/group.py b/system/group.py index 53ab5f904dc..d97dd2176ac 100644 --- a/system/group.py +++ b/system/group.py @@ -233,7 +233,8 @@ class FreeBsdGroup(Group): def group_add(self, **kwargs): cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name] if self.gid is not None: - cmd.append('-g %d' % int(self.gid)) + cmd.append('-g') + cmd.append('%d' % int(self.gid)) return self.execute_command(cmd) def group_mod(self, **kwargs): @@ -241,7 +242,8 @@ class FreeBsdGroup(Group): info = self.group_info() cmd_len = len(cmd) if self.gid is not None and int(self.gid) != info[2]: - cmd.append('-g %d' % int(self.gid)) + cmd.append('-g') + cmd.append('%d' % int(self.gid)) # modify the group if cmd will do anything if cmd_len != len(cmd): if self.module.check_mode: From d9db201b9a60dd7680694ae9af113ae21082306b Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Tue, 7 Jul 2015 20:30:07 +0100 Subject: [PATCH 155/386] Update vsphere_guest.py --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 8d1b7946688..863755aab26 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -71,7 +71,7 @@ options: from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (cannot be run with state). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. + - Specifies if the VM should be deployed from a template (mutually exclusive with state parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. default: no choices: ['yes', 'no'] template_src: From d6d6186aef83ea807a25124e6fd423d7df42a998 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Tue, 7 Jul 2015 20:30:33 +0100 Subject: [PATCH 156/386] Update vsphere_guest.py --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 863755aab26..5f5925b994d 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -71,7 +71,7 @@ options: from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (mutually exclusive with state parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. + - Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. default: no choices: ['yes', 'no'] template_src: From 76398781bac86caf6006e67a77a917155a02f3b4 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Tue, 7 Jul 2015 15:29:47 -0700 Subject: [PATCH 157/386] Fix up docs --- cloud/openstack/os_floating_ip.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index 5bd29240a67..10827012ae8 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -64,12 +64,12 @@ options: - When attaching a floating IP address, specify whether we should wait for it to appear as attached. required: false - default false + default: false timeout: description: - Time to wait for an IP address to appear as attached. See wait. required: false - default 60 + default: 60 state: description: - Should the resource be present or absent. From de89f9f99a9e2679f3f8ab8aaf84afb163f4c375 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Tue, 7 Jul 2015 16:10:44 -0700 Subject: [PATCH 158/386] Plumb ipv6 modes into os_subnet Shade already supports these, we just need to plumb them into the module code. --- cloud/openstack/os_subnet.py | 38 ++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index f96ce9fd633..b62eb10b0cc 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -92,6 +92,18 @@ options: - A list of host route dictionaries for the subnet. required: false default: None + ipv6_ra_mode: + description: + - IPv6 router advertisement mode + choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] + required: false + default: None + ipv6_address_mode: + description: + - IPv6 address mode + choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] + required: false + default: None requirements: - "python >= 2.6" - "shade" @@ -117,6 +129,19 @@ EXAMPLES = ''' - os_subnet: state=absent name=net1subnet + +# Create an ipv6 stateless subnet +- os_subnet: + state: present + name: intv6 + network_name: internal + ip_version: 6 + cidr: 2db8:1::/64 + dns_nameservers: + - 2001:4860:4860::8888 + - 2001:4860:4860::8844 + ipv6_ra_mode: dhcpv6-stateless + ipv6_address_mode: dhcpv6-stateless ''' @@ -163,6 +188,7 @@ def _system_state_change(module, subnet): def main(): + ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] argument_spec = openstack_full_argument_spec( name=dict(required=True), network_name=dict(default=None), @@ -174,6 +200,8 @@ def main(): allocation_pool_start=dict(default=None), allocation_pool_end=dict(default=None), host_routes=dict(default=None, type='list'), + ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices), + ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices), state=dict(default='present', choices=['absent', 'present']), ) @@ -196,6 +224,8 @@ def main(): pool_start = module.params['allocation_pool_start'] pool_end = module.params['allocation_pool_end'] host_routes = module.params['host_routes'] + ipv6_ra_mode = module.params['ipv6_ra_mode'] + ipv6_a_mode = module.params['ipv6_address_mode'] # Check for required parameters when state == 'present' if state == 'present': @@ -226,7 +256,9 @@ def main(): gateway_ip=gateway_ip, dns_nameservers=dns, allocation_pools=pool, - host_routes=host_routes) + host_routes=host_routes, + ipv6_ra_mode=ipv6_ra_mode, + ipv6_address_mode=ipv6_a_mode) changed = True else: if _needs_update(subnet, module): @@ -236,7 +268,9 @@ def main(): gateway_ip=gateway_ip, dns_nameservers=dns, allocation_pools=pool, - host_routes=host_routes) + host_routes=host_routes, + ipv6_ra_mode=ipv6_ra_mode, + ipv6_address_mode=ipv6_a_mode) changed = True else: changed = False From 706f5e25cc7c045b817cc940a439b57b3c570a06 Mon Sep 17 00:00:00 2001 From: Nic O'Connor Date: Sat, 31 Jan 2015 15:24:44 +0000 Subject: [PATCH 159/386] Added the ability to Linked_clone from snapshot --- cloud/vmware/vsphere_guest.py | 37 +++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 8ad7df41dea..58393ecb754 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -79,6 +79,18 @@ options: description: - Name of the source template to deploy from default: None + linked_clone: + version_added: "2.0" + description: + - Boolean. Creates a linked clone copy of the specified vm requires snapshot + required: false + default: false + snapshot: + version_added: "2.0" + description: + - Name of the snapshot you want to link clone from + required: false + default: none vm_disk: description: - A key, value list of disks and their sizes and which datastore to keep it in. @@ -513,7 +525,7 @@ def vmdisk_id(vm, current_datastore_name): return id_list -def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name): +def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, linked_clone, snapshot): vmTemplate = vsphere_client.get_vm_by_name(template_src) vmTarget = None @@ -545,9 +557,14 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo try: if vmTarget: changed = False + elif linked_clone and snapshot != None: + #Check linked_clone and snapshot value + vmTemplate.clone(guest, resourcepool=rpmor, linked=linked_clone, snapshot=snapshot) + changed = True else: vmTemplate.clone(guest, resourcepool=rpmor) changed = True + vsphere_client.disconnect() module.exit_json(changed=changed) except Exception as e: @@ -1148,9 +1165,11 @@ def main(): 'reconfigured' ], default='present'), - vmware_guest_facts=dict(required=False, choices=BOOLEANS), - from_template=dict(required=False, choices=BOOLEANS), + vmware_guest_facts=dict(required=False, type='bool'), + from_template=dict(required=False, type='bool'), template_src=dict(required=False, type='str'), + linked_clone=dict(required=False, default=False, type='bool'), + snapshot=dict(required=False, default=None, type='str'), guest=dict(required=True, type='str'), vm_disk=dict(required=False, type='dict', default={}), vm_nic=dict(required=False, type='dict', default={}), @@ -1159,7 +1178,7 @@ def main(): vm_hw_version=dict(required=False, default=None, type='str'), resource_pool=dict(required=False, default=None, type='str'), cluster=dict(required=False, default=None, type='str'), - force=dict(required=False, choices=BOOLEANS, default=False), + force=dict(required=False, type='bool', default=False), esxi=dict(required=False, type='dict', default={}), @@ -1176,8 +1195,9 @@ def main(): 'esxi' ], ['resource_pool', 'cluster'], - ['from_template', 'resource_pool', 'template_src'] + ['from_template', 'resource_pool', 'template_src'], ], + required_if=[('linked_clone', True, ['snapshot'])], ) if not HAS_PYSPHERE: @@ -1200,6 +1220,9 @@ def main(): cluster = module.params['cluster'] template_src = module.params['template_src'] from_template = module.params['from_template'] + linked_clone = module.params['linked_clone'] + snapshot = module.params['snapshot'] + # CONNECT TO THE SERVER viserver = VIServer() @@ -1279,7 +1302,9 @@ def main(): guest=guest, template_src=template_src, module=module, - cluster_name=cluster + cluster_name=cluster, + linked_clone=linked_clone, + snapshot=snapshot ) if state in ['restarted', 'reconfigured']: module.fail_json( From e33f0930753876be85f8f851e3cfdc81e219a73c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 9 Jul 2015 00:27:44 -0400 Subject: [PATCH 160/386] added ignore hidden to assemble --- files/assemble.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/files/assemble.py b/files/assemble.py index 1f9a952d04a..ad73c7b4354 100644 --- a/files/assemble.py +++ b/files/assemble.py @@ -79,6 +79,12 @@ options: U(http://docs.python.org/2/library/re.html). required: false default: null + ignore_hidden: + description: + - A boolean that controls if files that start with a '.' will be included or not. + required: false + default: false + version_added: "2.0" author: "Stephen Fromm (@sfromm)" extends_documentation_fragment: files ''' @@ -94,7 +100,7 @@ EXAMPLES = ''' # =========================================== # Support method -def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): +def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp() tmp = os.fdopen(tmpfd,'w') @@ -105,7 +111,7 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): if compiled_regexp and not compiled_regexp.search(f): continue fragment = "%s/%s" % (src_path, f) - if not os.path.isfile(fragment): + if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): continue fragment_content = file(fragment).read() @@ -148,6 +154,7 @@ def main(): backup=dict(default=False, type='bool'), remote_src=dict(default=False, type='bool'), regexp = dict(required=False), + ignore_hidden = dict(default=False, type='bool'), ), add_file_common_args=True ) @@ -162,6 +169,7 @@ def main(): delimiter = module.params['delimiter'] regexp = module.params['regexp'] compiled_regexp = None + ignore_hidden = module.params['ignore_hidden'] if not os.path.exists(src): module.fail_json(msg="Source (%s) does not exist" % src) @@ -175,7 +183,7 @@ def main(): except re.error, e: module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp)) - path = assemble_from_fragments(src, delimiter, compiled_regexp) + path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden) path_hash = module.sha1(path) if os.path.exists(dest): From f190f98b06a00abc3a3ba0432a52ec44a1924f86 Mon Sep 17 00:00:00 2001 From: "Hennadiy (Gena) Verkh" Date: Thu, 9 Jul 2015 11:39:46 +0200 Subject: [PATCH 161/386] Update uri.py Added methods 'TRACE', 'CONNECT' from https://www.rfc-editor.org/rfc/rfc2616.txt, section 5.1.1 Added method 'REFRESH' --- network/basics/uri.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 7be1cc92159..1e70d319fd0 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -73,6 +73,7 @@ options: description: - The HTTP method of the request or response. required: false + choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE", "CONNECT", "REFRESH" ] default: "GET" return_content: description: @@ -340,7 +341,7 @@ def main(): password = dict(required=False, default=None), body = dict(required=False, default=None), body_format = dict(required=False, default='raw', choices=['raw', 'json']), - method = dict(required=False, default='GET'), + method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH', 'TRACE', 'CONNECT', 'REFRESH']), return_content = dict(required=False, default='no', type='bool'), force_basic_auth = dict(required=False, default='no', type='bool'), follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), From fbb6277a37faa78ab8a01dee0e7877af372234ce Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 9 Jul 2015 09:33:10 -0400 Subject: [PATCH 162/386] Fix a small typo in parameter processing --- cloud/openstack/os_client_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 2c4af5c8c08..7128b06ffcb 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -61,7 +61,7 @@ def main(): config = os_client_config.OpenStackConfig() clouds = [] for cloud in config.get_all_clouds(): - if not module.params['clouds'] or cloud.name in module.param['clouds']: + if not p['clouds'] or cloud.name in p['clouds']: cloud.config['name'] = cloud.name clouds.append(cloud.config) module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds))) From 164043bd6db6c40fcece8614f8fce3fb66cdeed7 Mon Sep 17 00:00:00 2001 From: vanga Date: Thu, 9 Jul 2015 20:42:54 +0530 Subject: [PATCH 163/386] Throw error if encryption is set while passing a snapshot id --- cloud/amazon/ec2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 840cf4fed1f..a6b378c7e9c 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -692,6 +692,8 @@ def create_block_device(module, ec2, volume): size = volume.get('volume_size', snapshot.volume_size) if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size: module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) + if 'encrypted' in volume: + module.fail_json(msg = 'You can not set encyrption when creating a volume from a snapshot') if 'ephemeral' in volume: if 'snapshot' in volume: module.fail_json(msg = 'Cannot set both ephemeral and snapshot') @@ -702,7 +704,6 @@ def create_block_device(module, ec2, volume): delete_on_termination=volume.get('delete_on_termination', False), iops=volume.get('iops'), encrypted=volume.get('encrypted', None)) - def boto_supports_param_in_spot_request(ec2, param): """ Check if Boto library has a in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. From d46c036b75a82a07c42731154677512d069c4386 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 9 Jul 2015 08:16:17 -0700 Subject: [PATCH 164/386] Add notes about loop squashing and 1.9.2 change to install packages in one yum transaction --- packaging/os/yum.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 14339b4c18b..29d6b0100dc 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -118,10 +118,22 @@ options: choices: ["yes", "no"] aliases: [] -notes: [] +notes: + - When used with a loop of package names in a playbook, ansible optimizes + the call to the yum module. Instead of calling the module with a single + package each time through the loop, ansible calls the module once with all + of the package names from the loop. + - In versions prior to 1.9.2 this module installed and removed each package + given to the yum module separately. This caused problems when packages + specified by filename or url had to be installed or removed together. In + 1.9.2 this was fixed so that packages are installed in one yum + transaction. However, if one of the packages adds a new yum repository + that the other packages come from (such as epel-release) then that package + needs to be installed in a separate task. This mimics yum's command line + behaviour. # informational: requirements for nodes requirements: [ yum ] -author: +author: - "Ansible Core Team" - "Seth Vidal" ''' From 59225ca7b021611ace3f1212cfd578c0cd520559 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 9 Jul 2015 10:43:11 -0400 Subject: [PATCH 165/386] ensure password or ssh cert specified --- cloud/azure/azure.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index f1eea46525e..c4fa41a6eb1 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -567,8 +567,8 @@ def main(): module.fail_json(msg='location parameter is required for new instance') if not module.params.get('storage_account'): module.fail_json(msg='storage_account parameter is required for new instance') - if not module.params.get('password'): - module.fail_json(msg='password parameter is required for new instance') + if not (module.params.get('password') or module.params.get('ssh_cert_path')): + module.fail_json(msg='password or ssh_cert_path parameter is required for new instance') (changed, public_dns_name, deployment) = create_virtual_machine(module, azure) module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__))) From 68bd17b15e94a74cc70ebb49d6161bbb0254c487 Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Thu, 9 Jul 2015 15:29:00 -0400 Subject: [PATCH 166/386] Adding default cooldown to AWS ASG --- cloud/amazon/ec2_asg.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index eaeb141825e..5cf0282011c 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -109,6 +109,12 @@ options: default: EC2 version_added: "1.7" choices: ['EC2', 'ELB'] + default_cooldown: + description: + The number of seconds after a scaling activity completes before another can begin. + required: false + default: 300 seconds + version_added: "2.0" wait_timeout: description: - how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option. @@ -374,6 +380,7 @@ def create_autoscaling_group(connection, module): set_tags = module.params.get('tags') health_check_period = module.params.get('health_check_period') health_check_type = module.params.get('health_check_type') + default_cooldown = module.params.get('default_cooldown') wait_for_instances = module.params.get('wait_for_instances') as_groups = connection.get_all_groups(names=[group_name]) wait_timeout = module.params.get('wait_timeout') @@ -413,7 +420,8 @@ def create_autoscaling_group(connection, module): connection=connection, tags=asg_tags, health_check_period=health_check_period, - health_check_type=health_check_type) + health_check_type=health_check_type, + default_cooldown=default_cooldown) try: connection.create_auto_scaling_group(ag) @@ -774,6 +782,7 @@ def main(): tags=dict(type='list', default=[]), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), + default_cooldown=dict(type='int', default=300), wait_for_instances=dict(type='bool', default=True) ), ) From 9144785c42c082172aeb72544ec68503e39d788c Mon Sep 17 00:00:00 2001 From: Nicholas O'Connor Date: Thu, 9 Jul 2015 16:46:39 -0400 Subject: [PATCH 167/386] Created option snapshot_to_clone. When specified, snapshot_to_clone will create a linked clone copy of the VM. --- cloud/vmware/vsphere_guest.py | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 58393ecb754..8ddc0fa7e50 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -79,16 +79,10 @@ options: description: - Name of the source template to deploy from default: None - linked_clone: - version_added: "2.0" + snapshot_to_clone: + version_added 2.0 description: - - Boolean. Creates a linked clone copy of the specified vm requires snapshot - required: false - default: false - snapshot: - version_added: "2.0" - description: - - Name of the snapshot you want to link clone from + - String. When specified, snapshot_to_clone will create a linked clone copy of the VM, Snapshot must already be taken in vCenter. required: false default: none vm_disk: @@ -525,7 +519,7 @@ def vmdisk_id(vm, current_datastore_name): return id_list -def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, linked_clone, snapshot): +def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone): vmTemplate = vsphere_client.get_vm_by_name(template_src) vmTarget = None @@ -557,9 +551,9 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo try: if vmTarget: changed = False - elif linked_clone and snapshot != None: - #Check linked_clone and snapshot value - vmTemplate.clone(guest, resourcepool=rpmor, linked=linked_clone, snapshot=snapshot) + elif snapshot_to_clone != None: + #check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone. + vmTemplate.clone(guest, resourcepool=rpmor, linked=True, snapshot=snapshot_to_clone) changed = True else: vmTemplate.clone(guest, resourcepool=rpmor) @@ -1168,8 +1162,7 @@ def main(): vmware_guest_facts=dict(required=False, type='bool'), from_template=dict(required=False, type='bool'), template_src=dict(required=False, type='str'), - linked_clone=dict(required=False, default=False, type='bool'), - snapshot=dict(required=False, default=None, type='str'), + snapshot_to_clone=dict(required=False, default=None, type='str'), guest=dict(required=True, type='str'), vm_disk=dict(required=False, type='dict', default={}), vm_nic=dict(required=False, type='dict', default={}), @@ -1197,7 +1190,6 @@ def main(): ['resource_pool', 'cluster'], ['from_template', 'resource_pool', 'template_src'], ], - required_if=[('linked_clone', True, ['snapshot'])], ) if not HAS_PYSPHERE: @@ -1220,8 +1212,7 @@ def main(): cluster = module.params['cluster'] template_src = module.params['template_src'] from_template = module.params['from_template'] - linked_clone = module.params['linked_clone'] - snapshot = module.params['snapshot'] + snapshot_to_clone = module.params['snapshot_to_clone'] # CONNECT TO THE SERVER @@ -1303,8 +1294,7 @@ def main(): template_src=template_src, module=module, cluster_name=cluster, - linked_clone=linked_clone, - snapshot=snapshot + snapshot_to_clone=snapshot_to_clone ) if state in ['restarted', 'reconfigured']: module.fail_json( From 440b395f377972e7c9de66f74ad1875f51c50fd0 Mon Sep 17 00:00:00 2001 From: Chris Faulkner Date: Thu, 9 Jul 2015 13:04:13 -0700 Subject: [PATCH 168/386] Report change status on django_manage collectstatic. --- web_infrastructure/django_manage.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 201cd08303b..2637446d6f3 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -165,6 +165,9 @@ def syncdb_filter_output(line): def migrate_filter_output(line): return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) +def collectstatic_filter_output(line): + return "0 static files" not in line + def main(): command_allowed_param_map = dict( cleanup=(), From 4b1b10fa20217cb2e22d88f94d0b176a49dceebb Mon Sep 17 00:00:00 2001 From: Iiro Uusitalo Date: Tue, 7 Oct 2014 13:04:34 +0300 Subject: [PATCH 169/386] Refactor force basic auth, now all modules which use fetch_url() can use force_basic_auth --- network/basics/get_url.py | 12 ++++++++++++ network/basics/uri.py | 10 ---------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 646c0e42784..9ab039ebb4b 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -110,6 +110,15 @@ options: parameter is not specified, the C(url_password) parameter will not be used. required: false version_added: '1.6' + force_basic_auth: + description: + - httplib2, the library used by the uri module only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly + send a 401, logins will fail. This option forces the sending of the Basic authentication header + upon initial request. + required: false + choices: [ "yes", "no" ] + default: "no" others: description: - all arguments accepted by the M(file) module also work here @@ -125,6 +134,9 @@ EXAMPLES=''' - name: download file with sha256 check get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c + +- name: download file and force basic auth + get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes ''' import urlparse diff --git a/network/basics/uri.py b/network/basics/uri.py index 8095eaffe67..bd1557c7a0f 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -23,7 +23,6 @@ import cgi import shutil import tempfile -import base64 import datetime try: import json @@ -369,7 +368,6 @@ def main(): body_format = dict(required=False, default='raw', choices=['raw', 'json']), method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH']), return_content = dict(required=False, default='no', type='bool'), - force_basic_auth = dict(required=False, default='no', type='bool'), follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), creates = dict(required=False, default=None), removes = dict(required=False, default=None), @@ -394,7 +392,6 @@ def main(): method = module.params['method'] dest = module.params['dest'] return_content = module.params['return_content'] - force_basic_auth = module.params['force_basic_auth'] redirects = module.params['follow_redirects'] creates = module.params['creates'] removes = module.params['removes'] @@ -434,13 +431,6 @@ def main(): module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0) - # httplib2 only sends authentication after the server asks for it with a 401. - # Some 'basic auth' servies fail to send a 401 and require the authentication - # up front. This creates the Basic authentication header and sends it immediately. - if force_basic_auth: - dict_headers["Authorization"] = "Basic {0}".format(base64.b64encode("{0}:{1}".format(user, password))) - - # Make the request resp, content, dest = uri(module, url, dest, user, password, body, body_format, method, dict_headers, redirects, socket_timeout, validate_certs) resp['status'] = int(resp['status']) From afd02221845cea71f21d01a3d0a00d00e6548648 Mon Sep 17 00:00:00 2001 From: Iiro Uusitalo Date: Fri, 10 Jul 2015 08:42:01 +0300 Subject: [PATCH 170/386] uri.py is not using module_utils/urls.py from ansible core --- network/basics/uri.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/network/basics/uri.py b/network/basics/uri.py index bd1557c7a0f..8095eaffe67 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -23,6 +23,7 @@ import cgi import shutil import tempfile +import base64 import datetime try: import json @@ -368,6 +369,7 @@ def main(): body_format = dict(required=False, default='raw', choices=['raw', 'json']), method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH']), return_content = dict(required=False, default='no', type='bool'), + force_basic_auth = dict(required=False, default='no', type='bool'), follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), creates = dict(required=False, default=None), removes = dict(required=False, default=None), @@ -392,6 +394,7 @@ def main(): method = module.params['method'] dest = module.params['dest'] return_content = module.params['return_content'] + force_basic_auth = module.params['force_basic_auth'] redirects = module.params['follow_redirects'] creates = module.params['creates'] removes = module.params['removes'] @@ -431,6 +434,13 @@ def main(): module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0) + # httplib2 only sends authentication after the server asks for it with a 401. + # Some 'basic auth' servies fail to send a 401 and require the authentication + # up front. This creates the Basic authentication header and sends it immediately. + if force_basic_auth: + dict_headers["Authorization"] = "Basic {0}".format(base64.b64encode("{0}:{1}".format(user, password))) + + # Make the request resp, content, dest = uri(module, url, dest, user, password, body, body_format, method, dict_headers, redirects, socket_timeout, validate_certs) resp['status'] = int(resp['status']) From 9acf10face033dda6d5b1f570fb35cbd3deabac5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Jul 2015 13:51:04 -0400 Subject: [PATCH 171/386] Correctly default crypt_scheme in htpasswd --- web_infrastructure/htpasswd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index e567a776559..361a131ef2d 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -190,7 +190,7 @@ def main(): path=dict(required=True, aliases=["dest", "destfile"]), name=dict(required=True, aliases=["username"]), password=dict(required=False, default=None), - crypt_scheme=dict(required=False, default=None), + crypt_scheme=dict(required=False, default="apr_md5_crypt"), state=dict(required=False, default="present"), create=dict(type='bool', default='yes'), From 10df7b97eebe358d4ee716f76aa401587f023f0b Mon Sep 17 00:00:00 2001 From: Joel Thompson Date: Wed, 20 May 2015 15:39:17 -0400 Subject: [PATCH 172/386] Adding ability to filter AWS Route 53 private hosted zones by attached VPC --- cloud/amazon/route53.py | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index f9702cc38ae..e3f6c42735a 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -138,6 +138,15 @@ options: required: false default: null version_added: "2.0" + vpc_id: + description: + - When used in conjunction with private_zone: true, this will only modify + records in the private hosted zone attached to this VPC. This allows you + to have multiple private hosted zones, all with the same name, attached + to different VPCs. + required: false + default: null + version_added: "2.0" author: "Bruce Pennypacker (@bpennypacker)" extends_documentation_fragment: aws ''' @@ -250,14 +259,26 @@ try: except ImportError: HAS_BOTO = False -def get_zone_by_name(conn, module, zone_name, want_private, zone_id): +def get_zone_by_name(conn, module, zone_name, want_private, zone_id, want_vpc_id): """Finds a zone by name or zone_id""" for zone in conn.get_zones(): # only save this zone id if the private status of the zone matches # the private_zone_in boolean specified in the params private_zone = module.boolean(zone.config.get('PrivateZone', False)) if private_zone == want_private and ((zone.name == zone_name and zone_id == None) or zone.id.replace('/hostedzone/', '') == zone_id): - return zone + if want_vpc_id: + # NOTE: These details aren't available in other boto methods, hence the necessary + # extra API call + zone_details = conn.get_hosted_zone(zone.id)['GetHostedZoneResponse'] + # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882 + if isinstance(zone_details['VPCs'], dict): + if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id: + return zone + else: # Forward compatibility for when boto fixes that bug + if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]: + return zone + else: + return zone return None @@ -295,6 +316,7 @@ def main(): region = dict(required=False), health_check = dict(required=False), failover = dict(required=False), + vpc_id = dict(required=False), ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -318,6 +340,7 @@ def main(): region_in = module.params.get('region') health_check_in = module.params.get('health_check') failover_in = module.params.get('failover') + vpc_id_in = module.params.get('vpc_id') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) @@ -344,6 +367,11 @@ def main(): elif not alias_hosted_zone_id_in: module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete") + if vpc_id_in and not private_zone_in: + module.fail_json(msg="parameter 'private_zone' must be true when specifying parameter" + " 'vpc_id'") + + # connect to the route53 endpoint try: conn = Route53Connection(**aws_connect_kwargs) @@ -351,7 +379,7 @@ def main(): module.fail_json(msg = e.error_message) # Find the named zone ID - zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in) + zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in, vpc_id_in) # Verify that the requested zone is already defined in Route53 if zone is None: From b106a83d0a4225e20754179b71010a600bd8bc77 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Fri, 10 Jul 2015 16:33:00 -0400 Subject: [PATCH 173/386] Set force=yes as the default, add force parameter to module docs. --- windows/win_get_url.ps1 | 2 +- windows/win_get_url.py | 22 ++++++++++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 02f19b39360..46979c129f2 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -40,7 +40,7 @@ Else { Fail-Json $result "missing required argument: dest" } -$force = Get-Attr -obj $params -name "force" "no" | ConvertTo-Bool +$force = Get-Attr -obj $params -name "force" "yes" | ConvertTo-Bool If ($force -or -not (Test-Path $dest)) { $client = New-Object System.Net.WebClient diff --git a/windows/win_get_url.py b/windows/win_get_url.py index 585d3e2aa81..a34f23890b5 100644 --- a/windows/win_get_url.py +++ b/windows/win_get_url.py @@ -27,20 +27,28 @@ module: win_get_url version_added: "1.7" short_description: Fetches a file from a given URL description: - - Fetches a file from a URL and saves to locally + - Fetches a file from a URL and saves to locally options: url: description: - The full URL of a file to download required: true default: null - aliases: [] dest: description: - - The absolute path of the location to save the file at the URL. Be sure to include a filename and extension as appropriate. + - The absolute path of the location to save the file at the URL. Be sure + to include a filename and extension as appropriate. + required: true + default: null + force: + description: + - If C(yes), will always download the file. If C(no), will only + download the file if it does not exist or the remote file has been + modified more recently than the local file. + version_added: "2.0" required: false + choices: [ "yes", "no" ] default: yes - aliases: [] author: "Paul Durivage (@angstwad)" ''' @@ -54,4 +62,10 @@ $ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthr win_get_url: url: 'http://www.example.com/earthrise.jpg' dest: 'C:\Users\RandomUser\earthrise.jpg' + +- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' only if modified + win_get_url: + url: 'http://www.example.com/earthrise.jpg' + dest: 'C:\Users\RandomUser\earthrise.jpg' + force: no ''' From c0e4c50eebc579a89a8377b0e84864d206c49937 Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Thu, 9 Jul 2015 13:38:14 -0700 Subject: [PATCH 174/386] s3 module: Add missing version tag to "encrypt" parameter --- cloud/amazon/s3.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 095befe173a..7b6990e25e3 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -50,6 +50,7 @@ options: - When set for PUT mode, asks for server-side encryption required: false default: no + version_added: "2.0" expiration: description: - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation. From ab5b5e881973530bea1a48c353e731cb013e7464 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 00:15:02 -0400 Subject: [PATCH 175/386] corrected version added --- database/mysql/mysql_variables.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index f50ed740539..a2ab0767b55 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -53,7 +53,7 @@ options: - mysql host to connect required: False login_port: - version_added: "1.9" + version_added: "2.0" description: - mysql port to connect required: False From a24ffc105636ebec2db80b05696dafeb9cc2979f Mon Sep 17 00:00:00 2001 From: Nikolay Ivanko Date: Mon, 13 Jul 2015 14:31:39 +0300 Subject: [PATCH 176/386] add virtual floppy to VMware guest --- cloud/vmware/vsphere_guest.py | 62 +++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index f0239544cec..732f1d13108 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -165,6 +165,9 @@ EXAMPLES = ''' vm_cdrom: type: "iso" iso_path: "DatastoreName/cd-image.iso" + vm_floppy: + type: "image" + image_path: "DatastoreName/floppy-image.flp" esxi: datacenter: MyDatacenter hostname: esx001.mydomain.local @@ -357,6 +360,44 @@ def add_cdrom(module, s, config_target, config, devices, default_devs, type="cli devices.append(cd_spec) +def add_floppy(module, s, config_target, config, devices, default_devs, type="image", vm_floppy_image_path=None): + # Add a floppy + # Make sure the datastore exists. + if vm_floppy_image_path: + image_location = vm_floppy_image_path.split('/', 1) + datastore, ds = find_datastore( + module, s, image_location[0], config_target) + image_path = image_location[1] + + floppy_spec = config.new_deviceChange() + floppy_spec.set_element_operation('add') + floppy_ctrl = VI.ns0.VirtualFloppy_Def("floppy_ctrl").pyclass() + + if type == "image": + image = VI.ns0.VirtualFloppyImageBackingInfo_Def("image").pyclass() + ds_ref = image.new_datastore(ds) + ds_ref.set_attribute_type(ds.get_attribute_type()) + image.set_element_datastore(ds_ref) + image.set_element_fileName("%s %s" % (datastore, image_path)) + floppy_ctrl.set_element_backing(image) + floppy_ctrl.set_element_key(3) + floppy_spec.set_element_device(floppy_ctrl) + elif type == "client": + client = VI.ns0.VirtualFloppyRemoteDeviceBackingInfo_Def( + "client").pyclass() + client.set_element_deviceName("/dev/fd0") + floppy_ctrl.set_element_backing(client) + floppy_ctrl.set_element_key(3) + floppy_spec.set_element_device(floppy_ctrl) + else: + s.disconnect() + module.fail_json( + msg="Error adding floppy of type %s to vm spec. " + " floppy type can either be image or client" % (type)) + + devices.append(floppy_spec) + + def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"): # add a NIC # Different network card types are: "VirtualE1000", @@ -922,6 +963,27 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, # Add a CD-ROM device to the VM. add_cdrom(module, vsphere_client, config_target, config, devices, default_devs, cdrom_type, cdrom_iso_path) + if 'vm_floppy' in vm_hardware: + floppy_image_path = None + floppy_type = None + try: + floppy_type = vm_hardware['vm_floppy']['type'] + except KeyError: + vsphere_client.disconnect() + module.fail_json( + msg="Error on %s definition. floppy type needs to be" + " specified." % vm_hardware['vm_floppy']) + if floppy_type == 'image': + try: + floppy_image_path = vm_hardware['vm_floppy']['image_path'] + except KeyError: + vsphere_client.disconnect() + module.fail_json( + msg="Error on %s definition. floppy image_path needs" + " to be specified." % vm_hardware['vm_floppy']) + # Add a floppy to the VM. + add_floppy(module, vsphere_client, config_target, config, devices, + default_devs, floppy_type, floppy_image_path) if vm_nic: for nic in sorted(vm_nic.iterkeys()): try: From a7f33ee5333eed9f50f32819f113b2a469dc4570 Mon Sep 17 00:00:00 2001 From: Frank van Tol Date: Mon, 13 Jul 2015 16:45:41 +0200 Subject: [PATCH 177/386] Remove default for engine_version Redis and memcached have different engine version numbering, there can not be a shared default value. --- cloud/amazon/elasticache.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index 3ec0fc2e351..f163ad312e8 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -42,7 +42,6 @@ options: description: - The version number of the cache engine required: false - default: 1.4.14 node_type: description: - The compute and memory capacity of the nodes in the cache cluster @@ -477,7 +476,7 @@ def main(): state={'required': True, 'choices': ['present', 'absent', 'rebooted']}, name={'required': True}, engine={'required': False, 'default': 'memcached'}, - cache_engine_version={'required': False, 'default': '1.4.14'}, + cache_engine_version={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, cache_port={'required': False, 'default': 11211, 'type': 'int'}, From a85640c36883ca6083efbfdebd963bfb72f5bad9 Mon Sep 17 00:00:00 2001 From: Frank van Tol Date: Mon, 13 Jul 2015 16:52:30 +0200 Subject: [PATCH 178/386] Update elasticache.py --- cloud/amazon/elasticache.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index f163ad312e8..6586a018a8d 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -42,6 +42,7 @@ options: description: - The version number of the cache engine required: false + default: none node_type: description: - The compute and memory capacity of the nodes in the cache cluster From 300656ca070bb9f5861535b4ffb030b8192c8502 Mon Sep 17 00:00:00 2001 From: Frank van Tol Date: Mon, 13 Jul 2015 17:21:16 +0200 Subject: [PATCH 179/386] Remove default port value, it does not work in _requires_destroy_and_create logic When creating a Redis cluster, every run it gets destroyed and recreated because the port number of memcached is used as the default. --- cloud/amazon/elasticache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index 6586a018a8d..31ed4696628 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -56,7 +56,7 @@ options: description: - The port number on which each of the cache nodes will accept connections required: false - default: 11211 + default: none cache_subnet_group: description: - The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc @@ -480,7 +480,7 @@ def main(): cache_engine_version={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, - cache_port={'required': False, 'default': 11211, 'type': 'int'}, + cache_port={'required': False, 'type': 'int'}, cache_subnet_group={'required': False, 'default': None}, cache_security_groups={'required': False, 'default': [default], 'type': 'list'}, From 9e11f5fd1534480877f2a7a73339c538962fefa8 Mon Sep 17 00:00:00 2001 From: Mitchell Ludwig Date: Mon, 13 Jul 2015 17:51:32 -0600 Subject: [PATCH 180/386] Improved stat documentation --- files/stat.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/files/stat.py b/files/stat.py index 5f79874d9fd..2e088fc8dbd 100644 --- a/files/stat.py +++ b/files/stat.py @@ -58,6 +58,23 @@ EXAMPLES = ''' - fail: msg="Whoops! file ownership has changed" when: st.stat.pw_name != 'root' +# Determine if a path exists and is a symlink. Note that if the path does +# not exist, and we test sym.stat.islnk, it will fail with an error. So +# therefore, we must test whether it is defined. +# Run this to understand the structure, the skipped ones do not pass the +# check performed by 'when' +- stat: path=/path/to/something + register: sym +- debug: msg="islnk isn't defined (path doesn't exist)" + when: sym.stat.islnk is not defined +- debug: msg="islnk is defined (path must exist)" + when: sym.stat.islnk is defined +- debug: msg="Path exists and is a symlink" + when: sym.stat.islnk is defined and sym.stat.islnk +- debug: msg="Path exists and isn't a symlink" + when: sym.stat.islnk is defined and sym.stat.islnk == False + + # Determine if a path exists and is a directory. Note that we need to test # both that p.stat.isdir actually exists, and also that it's set to true. - stat: path=/path/to/something From e916b04e91d02f7fd5d30dccb7b9eee922b3040a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 10:18:37 -0400 Subject: [PATCH 181/386] Also document in example that unarchive download was added in 2.0 --- files/unarchive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/unarchive.py b/files/unarchive.py index 3ee83de0dcd..2b373a8e7fb 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -83,7 +83,7 @@ EXAMPLES = ''' # Unarchive a file that is already on the remote machine - unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no -# Unarchive a file that needs to be downloaded +# Unarchive a file that needs to be downloaded (added in 2.0) - unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no ''' From e6ecca8809e9263170ab6abdd7398e5540dcb58b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 14 Jul 2015 07:27:09 -0700 Subject: [PATCH 182/386] Minor touch ups of vsphere_guest code. --- cloud/vmware/vsphere_guest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index b390facda2f..002ef44664e 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -80,7 +80,7 @@ options: - Name of the source template to deploy from default: None snapshot_to_clone: - version_added 2.0 + version_added "2.0" description: - String. When specified, snapshot_to_clone will create a linked clone copy of the VM, Snapshot must already be taken in vCenter. required: false @@ -619,7 +619,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo try: if vmTarget: changed = False - elif snapshot_to_clone != None: + elif snapshot_to_clone is not None: #check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone. vmTemplate.clone(guest, resourcepool=rpmor, linked=True, snapshot=snapshot_to_clone) changed = True From 01d4c432b004fae9f6bcd9cef45c4d669879d888 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 11:39:40 -0400 Subject: [PATCH 183/386] Revert "ec2_lc - include all launch config properties in the return" --- cloud/amazon/ec2_lc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 0721b4e203d..818e8efbb50 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -241,8 +241,7 @@ def create_launch_config(connection, module): module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=result.instance_type, - result=result) + security_groups=result.security_groups, instance_type=instance_type) def delete_launch_config(connection, module): From e1067ef670063b188fa8e8994faa89296f2a72ae Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 12:02:03 -0400 Subject: [PATCH 184/386] Revert "Revert "ec2_lc - include all launch config properties in the return"" --- cloud/amazon/ec2_lc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 818e8efbb50..0721b4e203d 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -241,7 +241,8 @@ def create_launch_config(connection, module): module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=instance_type) + security_groups=result.security_groups, instance_type=result.instance_type, + result=result) def delete_launch_config(connection, module): From b80ec0a33544dbea868c2548002db6f749401a70 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 16:54:49 -0400 Subject: [PATCH 185/386] fixed minor doc issues --- cloud/amazon/ec2_asg.py | 2 +- cloud/amazon/route53.py | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 5cf0282011c..efcd66606b8 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -111,7 +111,7 @@ options: choices: ['EC2', 'ELB'] default_cooldown: description: - The number of seconds after a scaling activity completes before another can begin. + - The number of seconds after a scaling activity completes before another can begin. required: false default: 300 seconds version_added: "2.0" diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index e3f6c42735a..c659843b9a3 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -140,10 +140,8 @@ options: version_added: "2.0" vpc_id: description: - - When used in conjunction with private_zone: true, this will only modify - records in the private hosted zone attached to this VPC. This allows you - to have multiple private hosted zones, all with the same name, attached - to different VPCs. + - "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC." + - This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs. required: false default: null version_added: "2.0" From 291fef3b34ea5510f031816d9c569f54098b8bec Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 17:03:21 -0400 Subject: [PATCH 186/386] fixed version added, reworded description a bit --- cloud/vmware/vsphere_guest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 002ef44664e..91f479549d9 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -80,9 +80,9 @@ options: - Name of the source template to deploy from default: None snapshot_to_clone: - version_added "2.0" description: - - String. When specified, snapshot_to_clone will create a linked clone copy of the VM, Snapshot must already be taken in vCenter. + - A string that when specified, will create a linked clone copy of the VM. Snapshot must already be taken in vCenter. + version_added: "2.0" required: false default: none vm_disk: From dd691779a1ab60127c6109015226dca18879e7a0 Mon Sep 17 00:00:00 2001 From: otdw Date: Tue, 14 Jul 2015 15:56:14 -0700 Subject: [PATCH 187/386] removed required together for resource pools, clusters, and template deployments. fixes inability to deploy from template on vsphere clusters without resource pools. Also, resource pools and cluster should not be required together as they are independant in vsphere --- cloud/vmware/vsphere_guest.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 91f479549d9..9ed6ede21c2 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -1256,8 +1256,7 @@ def main(): 'vm_hardware', 'esxi' ], - ['resource_pool', 'cluster'], - ['from_template', 'resource_pool', 'template_src'], + ['from_template', 'template_src'], ], ) From 985cdf2c281c78a9f0861ed8c2f77752b3854812 Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Sat, 6 Jun 2015 00:09:56 +0200 Subject: [PATCH 188/386] Add module parameter for security group name. This make ec2_elb_lb module consitent with others --- cloud/amazon/ec2_elb_lb.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 04be9e2813c..504efff10e7 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -56,6 +56,11 @@ options: require: false default: None version_added: "1.6" + security_group_names: + description: + - A list of security group names to apply to the elb + require: false + default: None health_check: description: - An associative array of health check configuration settings (see example) @@ -361,7 +366,8 @@ class ElbManager(object): if not check_elb: info = { 'name': self.name, - 'status': self.status + 'status': self.status, + 'region': self.region } else: try: @@ -389,6 +395,7 @@ class ElbManager(object): 'out_of_service_count': 0, 'in_service_count': 0, 'unknown_instance_state_count': 0 + 'region': self.region } # status of instances behind the ELB @@ -816,6 +823,7 @@ def main(): zones={'default': None, 'required': False, 'type': 'list'}, purge_zones={'default': False, 'required': False, 'type': 'bool'}, security_group_ids={'default': None, 'required': False, 'type': 'list'}, + security_group_names={'default': None, 'required': False, 'type': 'list'}, health_check={'default': None, 'required': False, 'type': 'dict'}, subnets={'default': None, 'required': False, 'type': 'list'}, purge_subnets={'default': False, 'required': False, 'type': 'bool'}, @@ -844,6 +852,7 @@ def main(): zones = module.params['zones'] purge_zones = module.params['purge_zones'] security_group_ids = module.params['security_group_ids'] + security_group_names = module.params['security_group_names'] health_check = module.params['health_check'] subnets = module.params['subnets'] purge_subnets = module.params['purge_subnets'] @@ -858,6 +867,23 @@ def main(): if state == 'present' and not (zones or subnets): module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") + if security_group_ids and security_group_names: + module.fail_json(msg = str("Use only one type of parameter (security_group_ids) or (security_group_names)")) + elif security_group_names: + security_group_ids = [] + try: + ec2 = ec2_connect(module) + grp_details = ec2.get_all_security_groups() + + for group_name in security_group_names: + if isinstance(group_name, basestring): + group_name = [group_name] + + group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] + security_group_ids.extend(group_id) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + elb_man = ElbManager(module, name, listeners, purge_listeners, zones, purge_zones, security_group_ids, health_check, subnets, purge_subnets, scheme, From 959c65c7e05665f1c8779307784f350c82e6fa6a Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Sun, 7 Jun 2015 00:04:15 +0200 Subject: [PATCH 189/386] Add version --- cloud/amazon/ec2_elb_lb.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 504efff10e7..2b8c76cefc3 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -61,6 +61,7 @@ options: - A list of security group names to apply to the elb require: false default: None + version_added: "2.0" health_check: description: - An associative array of health check configuration settings (see example) @@ -73,7 +74,7 @@ options: aliases: ['aws_region', 'ec2_region'] subnets: description: - - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. + - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. required: false default: None aliases: [] @@ -82,7 +83,7 @@ options: description: - Purge existing subnet on ELB that are not found in subnets required: false - default: false + default: false version_added: "1.7" scheme: description: @@ -152,7 +153,7 @@ EXAMPLES = """ name: "test-vpc" scheme: internal state: present - subnets: + subnets: - subnet-abcd1234 - subnet-1a2b3c4d listeners: @@ -218,7 +219,7 @@ EXAMPLES = """ instance_port: 80 purge_zones: yes -# Creates a ELB and assigns a list of subnets to it. +# Creates a ELB and assigns a list of subnets to it. - local_action: module: ec2_elb_lb state: present @@ -302,10 +303,10 @@ class ElbManager(object): """Handles ELB creation and destruction""" def __init__(self, module, name, listeners=None, purge_listeners=None, - zones=None, purge_zones=None, security_group_ids=None, + zones=None, purge_zones=None, security_group_ids=None, health_check=None, subnets=None, purge_subnets=None, scheme="internet-facing", connection_draining_timeout=None, - cross_az_load_balancing=None, + cross_az_load_balancing=None, stickiness=None, region=None, **aws_connect_params): self.module = module @@ -449,7 +450,7 @@ class ElbManager(object): else: info['cross_az_load_balancing'] = 'no' - # return stickiness info? + # return stickiness info? return info @@ -629,7 +630,7 @@ class ElbManager(object): self._attach_subnets(subnets_to_attach) if subnets_to_detach: self._detach_subnets(subnets_to_detach) - + def _set_zones(self): """Determine which zones need to be enabled or disabled on the ELB""" if self.zones: @@ -734,7 +735,7 @@ class ElbManager(object): else: self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0]) self.changed = True - + self._set_listener_policy(listeners_dict, policy) def select_stickiness_policy(self): @@ -801,7 +802,7 @@ class ElbManager(object): else: self._set_listener_policy(listeners_dict) - + def _get_health_check_target(self): """Compose target string from healthcheck parameters""" protocol = self.health_check['ping_protocol'].upper() From 6d6da470c8310b9bc5f846387f4dbb359b2d31b3 Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Thu, 9 Jul 2015 23:42:44 +0200 Subject: [PATCH 190/386] Fix missing , --- cloud/amazon/ec2_elb_lb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 2b8c76cefc3..ce353527f5a 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -395,7 +395,7 @@ class ElbManager(object): 'instances': [instance.id for instance in check_elb.instances], 'out_of_service_count': 0, 'in_service_count': 0, - 'unknown_instance_state_count': 0 + 'unknown_instance_state_count': 0, 'region': self.region } From e3d42562830755faa246da172da7e690f0a81792 Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Wed, 15 Jul 2015 10:45:38 +0200 Subject: [PATCH 191/386] Use mutually_exclusive in AnsibleModule --- cloud/amazon/ec2_elb_lb.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index ce353527f5a..f2a04863923 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -837,6 +837,7 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, + mutually_exclusive = [['security_group_ids', 'security_group_names']] ) if not HAS_BOTO: @@ -868,9 +869,7 @@ def main(): if state == 'present' and not (zones or subnets): module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") - if security_group_ids and security_group_names: - module.fail_json(msg = str("Use only one type of parameter (security_group_ids) or (security_group_names)")) - elif security_group_names: + if security_group_names: security_group_ids = [] try: ec2 = ec2_connect(module) From 0ca732baafab8b347a322481f1ad296eea9ce929 Mon Sep 17 00:00:00 2001 From: Mischa ter Smitten Date: Wed, 15 Jul 2015 12:00:23 +0200 Subject: [PATCH 192/386] The tilde expansion doesn't work with user.home --- system/user.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/system/user.py b/system/user.py index 7c3fa4c8594..33a3ba24d37 100644 --- a/system/user.py +++ b/system/user.py @@ -271,6 +271,9 @@ class User(object): self.update_password = module.params['update_password'] self.expires = None + if module.params['home'] is not None: + self.home = os.path.expanduser(module.params['home']) + if module.params['expires']: try: self.expires = time.gmtime(module.params['expires']) From 68a3c6ece478e044facc189f9154cad28ea4489e Mon Sep 17 00:00:00 2001 From: Matthias Frey Date: Wed, 15 Jul 2015 16:20:01 +0200 Subject: [PATCH 193/386] assemble: add file validation support. --- files/assemble.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/files/assemble.py b/files/assemble.py index ad73c7b4354..73d4214eb9e 100644 --- a/files/assemble.py +++ b/files/assemble.py @@ -85,6 +85,13 @@ options: required: false default: false version_added: "2.0" + validate: + description: + - The validation command to run before copying into place. The path to the file to + validate is passed in via '%s' which must be present as in the sshd example below. + The command is passed securely so shell features like expansion and pipes won't work. + required: false + default: "" author: "Stephen Fromm (@sfromm)" extends_documentation_fragment: files ''' @@ -95,6 +102,9 @@ EXAMPLES = ''' # When a delimiter is specified, it will be inserted in between each fragment - assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf delimiter='### START FRAGMENT ###' + +# Copy a new "sshd_config" file into place, after passing validation with sshd +- assemble: src=/etc/ssh/conf.d/ dest=/etc/ssh/sshd_config validate='sshd -t -f %s' ''' # =========================================== @@ -155,6 +165,7 @@ def main(): remote_src=dict(default=False, type='bool'), regexp = dict(required=False), ignore_hidden = dict(default=False, type='bool'), + validate = dict(required=False, type='str'), ), add_file_common_args=True ) @@ -170,6 +181,7 @@ def main(): regexp = module.params['regexp'] compiled_regexp = None ignore_hidden = module.params['ignore_hidden'] + validate = module.params.get('validate', None) if not os.path.exists(src): module.fail_json(msg="Source (%s) does not exist" % src) @@ -192,6 +204,13 @@ def main(): if path_hash != dest_hash: if backup and dest_hash is not None: module.backup_local(dest) + if validate: + if "%s" not in validate: + module.fail_json(msg="validate must contain %%s: %s" % validate) + (rc, out, err) = module.run_command(validate % path) + if rc != 0: + module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err)) + shutil.copy(path, dest) changed = True From b7e92b3e52431df7a59dfcddc414486bc8c6a6c5 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Wed, 15 Jul 2015 21:05:13 -0400 Subject: [PATCH 194/386] docker: fix parsing of docker __version__ string If `docker.__version__` contains non-digit characters, such as: >>> import docker >>> docker.__version__ '1.4.0-dev' Then `get_docker_py_versioninfo` will fail with: ValueError: invalid literal for int() with base 10: '0-de' This patch corrects the parsing of the version string so that `get_docker_py_versioninfo` in this example would return: (1, 4, 0, '-dev') --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 2bbbbd158a2..c9f967757ef 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -481,6 +481,7 @@ def get_docker_py_versioninfo(): if not char.isdigit(): nondigit = part[idx:] digit = part[:idx] + break if digit: version.append(int(digit)) if nondigit: From 6672205f49907fb65ab5e103c9a20b502a1333e5 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Wed, 15 Jul 2015 21:11:01 -0400 Subject: [PATCH 195/386] docker: permit empty or false pid The `docker` Python module only accepts `None` or `'host'` as arguments. This makes it difficult to conditionally set the `pid` attribute using standard Ansible syntax. This change converts any value that evaluates as boolean `False` to `None`, which includes empty strings: pid: As well as an explicit `false`: pid: false This permits the following to work as intended: - hosts: localhost tasks: - name: starting container docker: docker_api_version: 1.18 image: larsks/mini-httpd name: web pid: "{{ container_pid|default('') }}" If `container_pid` is set to `host` somewhere, this will create a Docker container with `pid=host`; otherwise, this will create a container with normal isolated pid namespace. --- cloud/docker/docker.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 2bbbbd158a2..71505e258fe 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1270,6 +1270,10 @@ class DockerManager(object): if params['restart_policy']['Name'] == 'on-failure': params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + # docker_py only accepts 'host' or None + if 'pid' in optionals and not optionals['pid']: + optionals['pid'] = None + if optionals['pid'] is not None: self.ensure_capability('pid') params['pid_mode'] = optionals['pid'] From a9e8cae82e50ee15ee2f1f93dc9ff5d78a85ead1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 11:36:40 -0400 Subject: [PATCH 196/386] attempt to fix check mode when state='absent' --- cloud/amazon/ec2_key.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/cloud/amazon/ec2_key.py b/cloud/amazon/ec2_key.py index a9217bd69db..b59c50034d6 100644 --- a/cloud/amazon/ec2_key.py +++ b/cloud/amazon/ec2_key.py @@ -127,25 +127,23 @@ def main(): if state == 'absent': if key: '''found a match, delete it''' - try: - key.delete() - if wait: - start = time.time() - action_complete = False - while (time.time() - start) < wait_timeout: - if not ec2.get_key_pair(name): - action_complete = True - break - time.sleep(1) - if not action_complete: - module.fail_json(msg="timed out while waiting for the key to be removed") - except Exception, e: - module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) - else: - key = None - changed = True - else: - '''no match found, no changes required''' + if not module.check_mode: + try: + key.delete() + if wait: + start = time.time() + action_complete = False + while (time.time() - start) < wait_timeout: + if not ec2.get_key_pair(name): + action_complete = True + break + time.sleep(1) + if not action_complete: + module.fail_json(msg="timed out while waiting for the key to be removed") + except Exception, e: + module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) + key = None + changed = True # Ensure requested key is present elif state == 'present': From 8e7d9be02bc1f4f12dc538684e3353a8f8883b97 Mon Sep 17 00:00:00 2001 From: Andrew Briening Date: Thu, 25 Jun 2015 16:52:23 -0400 Subject: [PATCH 197/386] Adds basic authentication & skip certificate validation to win_get_url module --- windows/win_get_url.ps1 | 17 +++++++++++++++++ windows/win_get_url.py | 17 ++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 46979c129f2..525854eae87 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -40,11 +40,23 @@ Else { Fail-Json $result "missing required argument: dest" } +$skip_certificate_validation = Get-Attr $params "skip_certificate_validation" $false | ConvertTo-Bool +$username = Get-Attr $params "username" +$password = Get-Attr $params "password" + +if($skip_certificate_validation){ + [System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true} +} + $force = Get-Attr -obj $params -name "force" "yes" | ConvertTo-Bool If ($force -or -not (Test-Path $dest)) { $client = New-Object System.Net.WebClient + if($username -and $password){ + $client.Credentials = New-Object System.Net.NetworkCredential($username, $password) + } + Try { $client.DownloadFile($url, $dest) $result.changed = $true @@ -56,6 +68,11 @@ If ($force -or -not (Test-Path $dest)) { Else { Try { $webRequest = [System.Net.HttpWebRequest]::Create($url) + + if($username -and $password){ + $webRequest.Credentials = New-Object System.Net.NetworkCredential($username, $password) + } + $webRequest.IfModifiedSince = ([System.IO.FileInfo]$dest).LastWriteTime $webRequest.Method = "GET" [System.Net.HttpWebResponse]$webResponse = $webRequest.GetResponse() diff --git a/windows/win_get_url.py b/windows/win_get_url.py index a34f23890b5..5c3e994d418 100644 --- a/windows/win_get_url.py +++ b/windows/win_get_url.py @@ -28,6 +28,7 @@ version_added: "1.7" short_description: Fetches a file from a given URL description: - Fetches a file from a URL and saves to locally +author: "Paul Durivage (@angstwad)" options: url: description: @@ -49,7 +50,21 @@ options: required: false choices: [ "yes", "no" ] default: yes -author: "Paul Durivage (@angstwad)" + username: + description: + - Basic authentication username + required: false + default: null + password: + description: + - Basic authentication password + required: false + default: null + skip_certificate_validation: + description: + - Skip SSL certificate validation if true + required: false + default: false ''' EXAMPLES = ''' From 625fb1e182db778b6e67b0dc1f46001c1b23a565 Mon Sep 17 00:00:00 2001 From: Andrew Briening Date: Thu, 16 Jul 2015 15:01:09 -0400 Subject: [PATCH 198/386] Show the exception messages --- windows/win_get_url.ps1 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 525854eae87..18977bff1ef 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -62,7 +62,7 @@ If ($force -or -not (Test-Path $dest)) { $result.changed = $true } Catch { - Fail-Json $result "Error downloading $url to $dest" + Fail-Json $result "Error downloading $url to $dest $($_.Exception.Message)" } } Else { @@ -85,11 +85,11 @@ Else { } Catch [System.Net.WebException] { If ($_.Exception.Response.StatusCode -ne [System.Net.HttpStatusCode]::NotModified) { - Fail-Json $result "Error downloading $url to $dest" + Fail-Json $result "Error downloading $url to $dest $($_.Exception.Message)" } } Catch { - Fail-Json $result "Error downloading $url to $dest" + Fail-Json $result "Error downloading $url to $dest $($_.Exception.Message)" } } From 444a2ad808d3f794b1e646e391b7352c5373675b Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 16 Jul 2015 15:25:39 -0400 Subject: [PATCH 199/386] Do not erroneously mask exceptions There was a catch-all `except` statement in `create_containers`: try: containers = do_create(count, params) except: self.pull_image() containers = do_create(count, params) This would mask a variety of errors that should be exposed, including API compatability errors (as in #1707) and common Python exceptions (KeyError, ValueError, etc) that could result from errors in the code. This change makes the `except` statement more specific, and only attempts to pull the image and start a container if the original create attempt failed due to a 404 error from the docker API. --- cloud/docker/docker.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 71505e258fe..131ac59e0ae 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -389,6 +389,7 @@ from urlparse import urlparse try: import docker.client import docker.utils + import docker.errors from requests.exceptions import RequestException except ImportError: HAS_DOCKER_PY = False @@ -1322,7 +1323,10 @@ class DockerManager(object): try: containers = do_create(count, params) - except: + except docker.errors.APIError as e: + if e.response.status_code != 404: + raise + self.pull_image() containers = do_create(count, params) From efb6088c27c84a352df5ad92a60bbd1302017946 Mon Sep 17 00:00:00 2001 From: Maksim Losev Date: Mon, 27 Apr 2015 11:58:20 +0300 Subject: [PATCH 200/386] Use HostConfig object when creating container with Docker Remote API > 1.15 This is mlosev's patch (from #1208), rebased against devel as of 2790af2. It resolves #1707, which was caused by an API incompatibility between the docker module and server API version 1.19. --- cloud/docker/docker.py | 62 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 7 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 131ac59e0ae..f2e2b50e9dc 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -530,6 +530,7 @@ class DockerManager(object): 'extra_hosts': ((0, 7, 0), '1.3.1'), 'pid': ((1, 0, 0), '1.17'), 'log_driver': ((1, 2, 0), '1.18'), + 'host_config': ((0, 7, 0), '1.15'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -739,6 +740,52 @@ class DockerManager(object): else: return None + def get_start_params(self): + """ + Create start params + """ + params = { + 'lxc_conf': self.lxc_conf, + 'binds': self.binds, + 'port_bindings': self.port_bindings, + 'publish_all_ports': self.module.params.get('publish_all_ports'), + 'privileged': self.module.params.get('privileged'), + 'links': self.links, + 'network_mode': self.module.params.get('net'), + } + + optionals = {} + for optional_param in ('dns', 'volumes_from', 'restart_policy', + 'restart_policy_retry', 'pid'): + optionals[optional_param] = self.module.params.get(optional_param) + + if optionals['dns'] is not None: + self.ensure_capability('dns') + params['dns'] = optionals['dns'] + + if optionals['volumes_from'] is not None: + self.ensure_capability('volumes_from') + params['volumes_from'] = optionals['volumes_from'] + + if optionals['restart_policy'] is not None: + self.ensure_capability('restart_policy') + params['restart_policy'] = { 'Name': optionals['restart_policy'] } + if params['restart_policy']['Name'] == 'on-failure': + params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + + if optionals['pid'] is not None: + self.ensure_capability('pid') + params['pid_mode'] = optionals['pid'] + + return params + + def get_host_config(self): + """ + Create HostConfig object + """ + params = self.get_start_params() + return docker.utils.create_host_config(**params) + def get_port_bindings(self, ports): """ Parse the `ports` string into a port bindings dict for the `start_container` call. @@ -1292,16 +1339,10 @@ class DockerManager(object): return docker.utils.create_host_config(**params) def create_containers(self, count=1): - try: - mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) - except ValueError as e: - self.module.fail_json(msg=str(e)) - params = {'image': self.module.params.get('image'), 'command': self.module.params.get('command'), 'ports': self.exposed_ports, 'volumes': self.volumes, - 'mem_limit': mem_limit, 'environment': self.env, 'hostname': self.module.params.get('hostname'), 'domainname': self.module.params.get('domainname'), @@ -1309,9 +1350,11 @@ class DockerManager(object): 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), - 'host_config': self.create_host_config(), } + if self.ensure_capability('host_config', fail=False): + params['host_config'] = self.get_host_config() + def do_create(count, params): results = [] for _ in range(count): @@ -1333,6 +1376,11 @@ class DockerManager(object): return containers def start_containers(self, containers): + params = {} + + if not self.ensure_capability('host_config', fail=False): + params = self.get_start_params() + for i in containers: self.client.start(i) self.increment_counter('started') From 963eb242f166ced5fd7904c31a72fc9feb1b0613 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 16:52:56 -0400 Subject: [PATCH 201/386] updated to add missing 'use' option --- packaging/os/package.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/packaging/os/package.py b/packaging/os/package.py index 7c94b98a941..288ca83a772 100644 --- a/packaging/os/package.py +++ b/packaging/os/package.py @@ -23,7 +23,10 @@ DOCUMENTATION = ''' --- module: package version_added: 2.0 -author: Ansible Core Team +author: + - Ansible Inc +maintainers: + - Ansible Core Team short_description: Generic OS package manager description: - Installs, upgrade and removes packages using the underlying OS package manager. @@ -36,6 +39,11 @@ options: description: - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. required: true + use: + description: + - The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it. + required: false + default: auto requirements: - Whatever is required for the package plugins specific for each system. notes: From 6aac888c7da8df9d524f8c26f7594056e490aeb1 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Thu, 16 Jul 2015 15:04:55 -0400 Subject: [PATCH 202/386] Add new os_nova_flavor module. The os_nova_flavor module allows a user with administrative privileges to create and delete nova flavors. --- cloud/openstack/os_nova_flavor.py | 237 ++++++++++++++++++++++++++++++ 1 file changed, 237 insertions(+) create mode 100644 cloud/openstack/os_nova_flavor.py diff --git a/cloud/openstack/os_nova_flavor.py b/cloud/openstack/os_nova_flavor.py new file mode 100644 index 00000000000..82b3a53aa3d --- /dev/null +++ b/cloud/openstack/os_nova_flavor.py @@ -0,0 +1,237 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_nova_flavor +short_description: Manage OpenStack compute flavors +extends_documentation_fragment: openstack +version_added: "2.0" +author: "David Shrewsbury (@Shrews)" +description: + - Add or remove flavors from OpenStack. +options: + state: + description: + - Indicate desired state of the resource. When I(state) is 'present', + then I(ram), I(vcpus), and I(disk) are all required. There are no + default values for those parameters. + choices: ['present', 'absent'] + required: false + default: present + name: + description: + - Flavor name. + required: true + ram: + description: + - Amount of memory, in MB. + required: false + default: null + vcpus: + description: + - Number of virtual CPUs. + required: false + default: null + disk: + description: + - Size of local disk, in GB. + required: false + default: null + ephemeral: + description: + - Ephemeral space size, in GB. + required: false + default: 0 + swap: + description: + - Swap space size, in MB. + required: false + default: 0 + rxtx_factor: + description: + - RX/TX factor. + required: false + default: 1.0 + is_public: + description: + - Make flavor accessible to the public. + required: false + default: true + flavorid: + description: + - ID for the flavor. This is optional as a unique UUID will be + assigned if a value is not specified. + required: false + default: "auto" +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of +# local disk, and 10GB of ephemeral. +- os_nova_flavor: + cloud=mycloud + state=present + name=tiny + ram=1024 + vcpus=1 + disk=10 + ephemeral=10 + +# Delete 'tiny' flavor +- os_nova_flavor: + cloud=mycloud + state=absent + name=tiny +''' + +RETURN = ''' +flavor: + description: Dictionary describing the flavor. + returned: On success when I(state) is 'present' + type: dictionary + contains: + id: + description: Flavor ID. + returned: success + type: string + sample: "515256b8-7027-4d73-aa54-4e30a4a4a339" + name: + description: Flavor name. + returned: success + type: string + sample: "tiny" + disk: + description: Size of local disk, in GB. + returned: success + type: int + sample: 10 + ephemeral: + description: Ephemeral space size, in GB. + returned: success + type: int + sample: 10 + ram: + description: Amount of memory, in MB. + returned: success + type: int + sample: 1024 + swap: + description: Swap space size, in MB. + returned: success + type: int + sample: 100 + vcpus: + description: Number of virtual CPUs. + returned: success + type: int + sample: 2 + is_public: + description: Make flavor accessible to the public. + returned: success + type: bool + sample: true +''' + + +def _system_state_change(module, flavor): + state = module.params['state'] + if state == 'present' and not flavor: + return True + if state == 'absent' and flavor: + return True + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + state = dict(required=False, default='present', + choices=['absent', 'present']), + name = dict(required=False), + + # required when state is 'present' + ram = dict(required=False, type='int'), + vcpus = dict(required=False, type='int'), + disk = dict(required=False, type='int'), + + ephemeral = dict(required=False, default=0, type='int'), + swap = dict(required=False, default=0, type='int'), + rxtx_factor = dict(required=False, default=1.0, type='float'), + is_public = dict(required=False, default=True, type='bool'), + flavorid = dict(required=False, default="auto"), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule( + argument_spec, + supports_check_mode=True, + required_if=[ + ('state', 'present', ['ram', 'vcpus', 'disk']) + ], + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + name = module.params['name'] + + try: + cloud = shade.operator_cloud(**module.params) + flavor = cloud.get_flavor(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, flavor)) + + if state == 'present': + if not flavor: + flavor = cloud.create_flavor( + name=name, + ram=module.params['ram'], + vcpus=module.params['vcpus'], + disk=module.params['disk'], + flavorid=module.params['flavorid'], + ephemeral=module.params['ephemeral'], + swap=module.params['swap'], + rxtx_factor=module.params['rxtx_factor'], + is_public=module.params['is_public'] + ) + module.exit_json(changed=True, flavor=flavor) + module.exit_json(changed=False, flavor=flavor) + + elif state == 'absent': + if flavor: + cloud.delete_flavor(name) + module.exit_json(changed=True) + module.exit_json(changed=False) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +if __name__ == '__main__': + main() From 24c2bccd6607edd7ee67e5cb83b8c1749b58a3bc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 01:11:04 -0400 Subject: [PATCH 203/386] corrected version_added --- cloud/amazon/cloudformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index abde0ec375c..f95fbba00e2 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -67,7 +67,7 @@ options: required: false default: null aliases: [] - version_added: "x.x" + version_added: "1.9" tags: description: - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later. From dc71c04827dd7729e31e931bf32612e6fbc9288a Mon Sep 17 00:00:00 2001 From: whiter Date: Fri, 17 Jul 2015 15:54:17 +1000 Subject: [PATCH 204/386] Added 'resource_tags' alias --- cloud/amazon/ec2_vpc_net.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py index ebdd4ed6504..2ee730f59cb 100644 --- a/cloud/amazon/ec2_vpc_net.py +++ b/cloud/amazon/ec2_vpc_net.py @@ -59,6 +59,7 @@ options: - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different. default: None required: false + aliases: [ 'resource_tags' ] state: description: - The state of the VPC. Either absent or present. @@ -186,7 +187,7 @@ def main(): dns_support = dict(type='bool', default=True), dns_hostnames = dict(type='bool', default=True), dhcp_opts_id = dict(type='str', default=None, required=False), - tags = dict(type='dict', required=False, default=None), + tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']), state = dict(choices=['present', 'absent'], default='present'), multi_ok = dict(type='bool', default=False) ) From 3533f3953438be8e3b860a2cf514dbd192c26552 Mon Sep 17 00:00:00 2001 From: Eero Niemi Date: Fri, 17 Jul 2015 16:54:39 +0300 Subject: [PATCH 205/386] Fixed parameter validation when creating a volume from a snapshot --- cloud/amazon/ec2_vol.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 712be248af3..0d275cc91d7 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -436,11 +436,11 @@ def main(): # Delaying the checks until after the instance check allows us to get volume ids for existing volumes # without needing to pass an unused volume_size - if not volume_size and not (id or name): - module.fail_json(msg="You must specify an existing volume with id or name or a volume_size") + if not volume_size and not (id or name or snapshot): + module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot") - if volume_size and id: - module.fail_json(msg="Cannot specify volume_size and id") + if volume_size and (id or snapshot): + module.fail_json(msg="Cannot specify volume_size together with id or snapshot") if state == 'absent': delete_volume(module, ec2) From 048cfb857dd7a8c4a55d373e172c2fb47eea2135 Mon Sep 17 00:00:00 2001 From: Herby Gillot Date: Fri, 17 Jul 2015 21:09:34 -0400 Subject: [PATCH 206/386] rds: add the ability to reboot RDS instances --- cloud/amazon/rds.py | 60 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 3d6f192b9ab..4bfb7e666b0 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -28,7 +28,7 @@ options: required: true default: null aliases: [] - choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ] + choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ] instance_name: description: - Database instance identifier. Required except when using command=facts or command=delete on just a snapshot @@ -213,6 +213,13 @@ options: default: no choices: [ "yes", "no" ] aliases: [] + force_failover: + description: + - Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover. + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [] new_instance_name: description: - Name to rename an instance to. Used only when command=modify. @@ -292,6 +299,13 @@ EXAMPLES = ''' instance_name: new-database new_instance_name: renamed-database wait: yes + +# Reboot an instance and wait for it to become available again +- rds + command: reboot + instance_name: database + wait: yes + ''' import sys @@ -380,6 +394,13 @@ class RDSConnection: except boto.exception.BotoServerError, e: raise RDSException(e) + def reboot_db_instance(self, instance_name, **params): + try: + result = self.connection.reboot_dbinstance(instance_name) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) @@ -464,6 +485,13 @@ class RDS2Connection: except boto.exception.BotoServerError, e: raise RDSException(e) + def reboot_db_instance(self, instance_name, **params): + try: + result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] @@ -847,6 +875,31 @@ def snapshot_db_instance(module, conn): module.exit_json(changed=changed, snapshot=resource.get_data()) +def reboot_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = [] + + if has_rds2: + valid_vars.append('force_failover') + + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + result = conn.get_db_instance(instance_name) + changed = False + try: + result = conn.reboot_db_instance(instance_name, **params) + changed = True + except RDSException, e: + module.fail_json(msg=e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + def restore_db_instance(module, conn): required_vars = ['instance_name', 'snapshot'] valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone', @@ -918,6 +971,7 @@ def validate_parameters(required_vars, valid_vars, module): 'instance_type': 'db_instance_class', 'password': 'master_user_password', 'new_instance_name': 'new_db_instance_identifier', + 'force_failover': 'force_failover', } if has_rds2: optional_params.update(optional_params_rds2) @@ -960,7 +1014,7 @@ def validate_parameters(required_vars, valid_vars, module): def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True), + command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True), instance_name = dict(required=False), source_instance = dict(required=False), db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), @@ -992,6 +1046,7 @@ def main(): tags = dict(type='dict', required=False), publicly_accessible = dict(required=False), character_set_name = dict(required=False), + force_failover = dict(type='bool', required=False, default=False) ) ) @@ -1010,6 +1065,7 @@ def main(): 'modify': modify_db_instance, 'promote': promote_db_instance, 'snapshot': snapshot_db_instance, + 'reboot': reboot_db_instance, 'restore': restore_db_instance, } From db199991856053c8ee3e1a201aed25d27119e0d0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 23:13:21 -0400 Subject: [PATCH 207/386] minor doc fixes, version added for latest feature --- cloud/amazon/rds.py | 54 +++++++++------------------------------------ 1 file changed, 10 insertions(+), 44 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 4bfb7e666b0..9e98f50230b 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -24,147 +24,123 @@ description: options: command: description: - - Specifies the action to take. + - Specifies the action to take. required: true - default: null - aliases: [] choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ] instance_name: description: - Database instance identifier. Required except when using command=facts or command=delete on just a snapshot required: false default: null - aliases: [] source_instance: description: - Name of the database to replicate. Used only when command=replicate. required: false default: null - aliases: [] db_engine: description: - - The type of database. Used only when command=create. + - The type of database. Used only when command=create. required: false default: null - aliases: [] choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'] size: description: - Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify. required: false default: null - aliases: [] instance_type: description: - - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. + - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. required: false default: null - aliases: [] username: description: - Master database username. Used only when command=create. required: false default: null - aliases: [] password: description: - Password for the master database username. Used only when command=create or command=modify. required: false default: null - aliases: [] region: description: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: true - default: null aliases: [ 'aws_region', 'ec2_region' ] db_name: description: - Name of a database to create within the instance. If not specified then no database is created. Used only when command=create. required: false default: null - aliases: [] engine_version: description: - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used. required: false default: null - aliases: [] parameter_group: description: - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify. required: false default: null - aliases: [] license_model: description: - - The license model for this DB instance. Used only when command=create or command=restore. + - The license model for this DB instance. Used only when command=create or command=restore. required: false default: null - aliases: [] choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ] multi_zone: description: - Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify. - choices: [ "yes", "no" ] + choices: [ "yes", "no" ] required: false default: null - aliases: [] iops: description: - Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000. required: false default: null - aliases: [] security_groups: description: - Comma separated list of one or more security groups. Used only when command=create or command=modify. required: false default: null - aliases: [] vpc_security_groups: description: - Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify. required: false default: null - aliases: [] port: description: - Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate. required: false default: null - aliases: [] upgrade: description: - Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate. required: false default: no choices: [ "yes", "no" ] - aliases: [] option_group: description: - The name of the option group to use. If not specified then the default option group is used. Used only when command=create. required: false default: null - aliases: [] maint_window: description: - "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify." required: false default: null - aliases: [] backup_window: description: - Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify. required: false default: null - aliases: [] backup_retention: description: - "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify." required: false default: null - aliases: [] zone: description: - availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore. @@ -176,18 +152,15 @@ options: - VPC subnet group. If specified then a VPC instance is created. Used only when command=create. required: false default: null - aliases: [] snapshot: description: - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot. required: false default: null - aliases: [] aws_secret_key: description: - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false - default: null aliases: [ 'ec2_secret_key', 'secret_key' ] aws_access_key: description: @@ -201,53 +174,46 @@ options: required: false default: "no" choices: [ "yes", "no" ] - aliases: [] wait_timeout: description: - how long before wait gives up, in seconds default: 300 - aliases: [] apply_immediately: description: - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. default: no choices: [ "yes", "no" ] - aliases: [] force_failover: description: - Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover. required: false default: "no" choices: [ "yes", "no" ] - aliases: [] + version_added: "2.0" new_instance_name: description: - Name to rename an instance to. Used only when command=modify. required: false default: null - aliases: [] - version_added: 1.5 + version_added: "1.5" character_set_name: description: - Associate the DB instance with a specified character set. Used with command=create. required: false default: null - aliases: [] - version_added: 1.9 + version_added: "1.9" publicly_accessible: description: - explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0 required: false default: null - aliases: [] - version_added: 1.9 + version_added: "1.9" tags: description: - tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 required: false default: null - aliases: [] - version_added: 1.9 + version_added: "1.9" requirements: - "python >= 2.6" - "boto" From e04f75d8721d67175d00259b364ee8596222ef4d Mon Sep 17 00:00:00 2001 From: bambou Date: Sat, 18 Jul 2015 17:53:02 +0200 Subject: [PATCH 208/386] Check if the gid is set --- system/group.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/system/group.py b/system/group.py index d97dd2176ac..ab542d9bc47 100644 --- a/system/group.py +++ b/system/group.py @@ -273,7 +273,8 @@ class DarwinGroup(Group): def group_add(self, **kwargs): cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += [ '-o', 'create' ] - cmd += [ '-i', self.gid ] + if self.gid is not None: + cmd += [ '-i', self.gid ] cmd += [ '-L', self.name ] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) @@ -285,12 +286,13 @@ class DarwinGroup(Group): (rc, out, err) = self.execute_command(cmd) return (rc, out, err) - def group_mod(self): + def group_mod(self, gid=None): info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += [ '-o', 'edit' ] - cmd += [ '-i', self.gid ] + if gid is not None: + cmd += [ '-i', gid ] cmd += [ '-L', self.name ] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) From 718fd1f891fed028ac8d917817cafaf7817abf1f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 18 Jul 2015 23:28:41 -0400 Subject: [PATCH 209/386] prevent usless assignment of home --- system/user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/user.py b/system/user.py index 33a3ba24d37..1045df70e67 100644 --- a/system/user.py +++ b/system/user.py @@ -253,7 +253,6 @@ class User(object): self.group = module.params['group'] self.groups = module.params['groups'] self.comment = module.params['comment'] - self.home = module.params['home'] self.shell = module.params['shell'] self.password = module.params['password'] self.force = module.params['force'] @@ -269,6 +268,7 @@ class User(object): self.ssh_comment = module.params['ssh_key_comment'] self.ssh_passphrase = module.params['ssh_key_passphrase'] self.update_password = module.params['update_password'] + self.home = None self.expires = None if module.params['home'] is not None: From 3849a6d87b214838c5906d9202eb9c48c75fdae5 Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Sun, 19 Jul 2015 12:45:31 +0200 Subject: [PATCH 210/386] synchronize: add flag for verifying target host. Add the possibility to verify the target host using a "verify_host" flag. It is disabled by default to not change the module behaviour. --- files/synchronize.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/files/synchronize.py b/files/synchronize.py index abad5ad359f..ff58f9c1032 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -158,6 +158,12 @@ options: default: no required: false version_added: "2.0" + verify_host: + description: + - Verify destination host key. + default: no + required: false + version_added: "2.0" notes: - rsync must be installed on both the local and remote machine. - Inspect the verbose output to validate the destination user/host/path @@ -244,6 +250,7 @@ def main(): rsync_opts = dict(type='list'), ssh_args = dict(type='str'), partial = dict(default='no', type='bool'), + verify_host = dict(default='no', type='bool'), ), supports_check_mode = True ) @@ -272,6 +279,7 @@ def main(): group = module.params['group'] rsync_opts = module.params['rsync_opts'] ssh_args = module.params['ssh_args'] + verify_host = module.params['verify_host'] cmd = '%s --delay-updates -F' % rsync if compress: @@ -324,10 +332,13 @@ def main(): else: private_key = '-i '+ private_key + ssh_opts = '-S none' + + if not verify_host: + ssh_opts = '%s -o StrictHostKeyChecking=no' % ssh_opts + if ssh_args: - ssh_opts = '-S none -o StrictHostKeyChecking=no %s' % ssh_args - else: - ssh_opts = '-S none -o StrictHostKeyChecking=no' + ssh_opts = '%s %s' % (ssh_opts, ssh_args) if dest_port != 22: cmd += " --rsh 'ssh %s %s -o Port=%s'" % (private_key, ssh_opts, dest_port) From ca80b92233c5eed5f08663d16d9c4bd7600c8e48 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 19 Jul 2015 17:42:12 -0400 Subject: [PATCH 211/386] added version_Added to get_url's force_basic_auth --- network/basics/get_url.py | 1 + 1 file changed, 1 insertion(+) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 9ab039ebb4b..66fc71b78da 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -111,6 +111,7 @@ options: required: false version_added: '1.6' force_basic_auth: + version_added: '2.0' description: - httplib2, the library used by the uri module only sends authentication information when a webservice responds to an initial request with a 401 status. Since some basic auth services do not properly From fca75a9705e8c2d698d65f86337cdfc4ea996521 Mon Sep 17 00:00:00 2001 From: Daniel Kerwin Date: Mon, 24 Nov 2014 22:55:14 +0100 Subject: [PATCH 212/386] Add support for cpusets. Requires docker-py >= 0.6.0 --- cloud/docker/docker.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 5e78ec98969..bde2f7dafe5 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -273,8 +273,14 @@ options: docker-py >= 0.5.0. default: false version_added: "1.9" + cpuset: + description: + - CPUs in which to allow execution. Requires docker-py >= 0.6.0. + required: false + default: null + version_added: "1.8" -author: +author: - "Cove Schneider (@cove)" - "Joshua Conner (@joshuaconner)" - "Pavel Antonov (@softzilla)" @@ -1583,6 +1589,7 @@ def main(): pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), + cpuset = dict(default=None), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From a8bc50a11f5801fa71ccb695867d9944ea294db5 Mon Sep 17 00:00:00 2001 From: Daniel Kerwin Date: Mon, 24 Nov 2014 23:08:12 +0100 Subject: [PATCH 213/386] Renamed to cpu_set --- cloud/docker/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index bde2f7dafe5..b30046ae157 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -273,7 +273,7 @@ options: docker-py >= 0.5.0. default: false version_added: "1.9" - cpuset: + cpu_set: description: - CPUs in which to allow execution. Requires docker-py >= 0.6.0. required: false @@ -1589,7 +1589,7 @@ def main(): pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), - cpuset = dict(default=None), + cpu_set = dict(default=None), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From a702dbd29ac266837da9a0a25d2c6b51d3a87c44 Mon Sep 17 00:00:00 2001 From: Daniel Kerwin Date: Mon, 29 Jun 2015 22:15:25 +0200 Subject: [PATCH 214/386] Switch to _cap_ver_req and add cpu_set to create_containers --- cloud/docker/docker.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index b30046ae157..fcd2a3453d0 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -537,6 +537,7 @@ class DockerManager(object): 'pid': ((1, 0, 0), '1.17'), 'log_driver': ((1, 2, 0), '1.18'), 'host_config': ((0, 7, 0), '1.15'), + 'cpu_set': ((0, 6, 0), '1.14'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -559,7 +560,7 @@ class DockerManager(object): elif 2 <= len(parts) <= 3: # default to read-write ro = False - # with supplied bind mode + # with supplied bind mode if len(parts) == 3: if parts[2] not in ['ro', 'rw']: self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') @@ -1356,6 +1357,8 @@ class DockerManager(object): 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), + 'cpuset': self.module.params.get('cpu_set'), + 'host_config': self.create_host_config(), } if self.ensure_capability('host_config', fail=False): From d4d78a1998cc83e2aa232bb7922fc7c8c514e0e7 Mon Sep 17 00:00:00 2001 From: Daniel Kerwin Date: Tue, 30 Jun 2015 17:48:53 +0200 Subject: [PATCH 215/386] Too late for 1.8 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index fcd2a3453d0..62c637a5e04 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -278,7 +278,7 @@ options: - CPUs in which to allow execution. Requires docker-py >= 0.6.0. required: false default: null - version_added: "1.8" + version_added: "1.9" author: - "Cove Schneider (@cove)" From 01f8a99509c0715579c6085c518582d3fef6a941 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 20 Jul 2015 13:38:37 -0700 Subject: [PATCH 216/386] Deprecated _ec2_ami_search now verifies SSL certificates --- cloud/amazon/_ec2_ami_search.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 65953af2b5d..4fac97e9471 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -65,6 +65,15 @@ options: required: false default: paravirtual choices: ["paravirtual", "hvm"] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only + be set to C(no) used on personally controlled sites using self-signed + certificates. Prior to 1.9.3 the code defaulted to C(no). + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: '1.9.3' author: Lorin Hochstein ''' @@ -102,11 +111,12 @@ AWS_REGIONS = ['ap-northeast-1', def get_url(module, url): """ Get url and return response """ - try: - r = urllib2.urlopen(url) - except (urllib2.HTTPError, urllib2.URLError), e: - code = getattr(e, 'code', -1) - module.fail_json(msg="Request failed: %s" % str(e), status_code=code) + + r, info = fetch_url(module, url) + if info['status'] != 200: + # Backwards compat + info['status_code'] = info['status'] + module.fail_json(**info) return r @@ -182,7 +192,8 @@ def main(): choices=['i386', 'amd64']), region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), virt=dict(required=False, default='paravirtual', - choices=['paravirtual', 'hvm']) + choices=['paravirtual', 'hvm']), + validate_certs = dict(required=False, default=True, type='bool'), ) module = AnsibleModule(argument_spec=arg_spec) distro = module.params['distro'] @@ -196,6 +207,7 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * +from ansible.module_utils.urls import * if __name__ == '__main__': main() From 3f7313cc431229cde0e250b8c5f4f1b592d223a4 Mon Sep 17 00:00:00 2001 From: ayush Date: Mon, 20 Jul 2015 17:05:10 -0700 Subject: [PATCH 217/386] Updated doc strings so each character isn't considered a line --- cloud/amazon/s3.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 7b6990e25e3..072c8bc40d4 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -35,7 +35,8 @@ options: default: null aliases: ['ec2_secret_key', 'secret_key'] bucket: - description: Bucket name. + description: + - Bucket name. required: true default: null aliases: [] @@ -118,11 +119,13 @@ options: default: 0 version_added: "2.0" s3_url: - description: S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS + description: + - S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS default: null aliases: [ S3_URL ] src: - description: The source file path when performing a PUT operation. + description: + - The source file path when performing a PUT operation. required: false default: null aliases: [] From 24419284840cd7022a3083219c67570be07dc67e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Jul 2015 20:09:36 -0400 Subject: [PATCH 218/386] corrected v ersion added --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 62c637a5e04..1c4e6e8cd4e 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -278,7 +278,7 @@ options: - CPUs in which to allow execution. Requires docker-py >= 0.6.0. required: false default: null - version_added: "1.9" + version_added: "2.0" author: - "Cove Schneider (@cove)" From 1dafa427c3d0f299da2382c5b4a92d52805a5faa Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Jul 2015 20:52:43 -0400 Subject: [PATCH 219/386] added versionadded to new option in pip module --- packaging/language/pip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index abfb9385152..ec0bf93979c 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -71,7 +71,7 @@ options: required: false default: virtualenv virtualenv_python: - version_added: "FIXME" + version_added: "2.0" description: - The Python executable used for creating the virtual environment. For example C(python3.4), C(python2.7). When not specified, the From 030d6d645c61c9586b38c9b507bb2bb2a1b7efe4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Jul 2015 20:53:59 -0400 Subject: [PATCH 220/386] added version_Added for new signal option in docker module --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 59814c30e01..96254a10654 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -195,6 +195,7 @@ options: disabled, fail unless the process exits cleanly. default: true signal: + version_added: "2.0" description: - With the state "killed", you can alter the signal sent to the container. From 88167a5daca39304bbb0a20cd62a2c26e72fe5f3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Jul 2015 21:18:49 -0400 Subject: [PATCH 221/386] minor doc fixes to docker_user --- cloud/docker/docker.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index abb4e764aae..e77951abf49 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -164,8 +164,7 @@ options: description: - Username or UID to use within the container required: false - default: - aliases: [] + default: null version_added: "2.0" username: description: From 55bc9e8fb1c8e1a50418d8471a72f049e74fd06d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Jul 2015 22:37:02 -0400 Subject: [PATCH 222/386] added rickmendes as maintainer --- cloud/amazon/ec2_elb_lb.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index f2a04863923..3d54f994436 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -22,7 +22,9 @@ description: - Will be marked changed when called only if state is changed. short_description: Creates or destroys Amazon ELB. version_added: "1.5" -author: "Jim Dalton (@jsdalton)" +author: + - "Jim Dalton (@jsdalton)" + - "Rick Mendes (@rickmendes)" options: state: description: From 38f01b52e83f8a91c9e036a096d4413f0a39386d Mon Sep 17 00:00:00 2001 From: Johan Haals Date: Tue, 21 Jul 2015 14:24:07 +0200 Subject: [PATCH 223/386] Added support for --read-only Docker containers Adds support for mounting the container's root filesystem as read only. --- cloud/docker/docker.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e77951abf49..6308bd94efe 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -265,6 +265,12 @@ options: default: DockerHub aliases: [] version_added: "1.8" + read_only: + description: + - Mount the container's root filesystem as read only + default: false + aliases: [] + version_added: "1.9" restart_policy: description: - Container restart policy. @@ -772,6 +778,7 @@ class DockerManager(object): 'privileged': self.module.params.get('privileged'), 'links': self.links, 'network_mode': self.module.params.get('net'), + 'read_only': self.module.params.get('read_only'), } optionals = {} @@ -1609,6 +1616,7 @@ def main(): insecure_registry = dict(default=False, type='bool'), log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), cpu_set = dict(default=None), + read_only = dict(default=False, type='bool'), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From 6aedc1020e69d5f218b81462cfc64256e27952c9 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Tue, 21 Jul 2015 11:52:48 -0400 Subject: [PATCH 224/386] restore mem_limit mem_limit got lost in the #1744; this restores it. Thanks to @dgromov for the report. --- cloud/docker/docker.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e77951abf49..623cbbedc63 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1359,10 +1359,16 @@ class DockerManager(object): return docker.utils.create_host_config(**params) def create_containers(self, count=1): + try: + mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) + except ValueError as e: + self.module.fail_json(msg=str(e)) + params = {'image': self.module.params.get('image'), 'command': self.module.params.get('command'), 'ports': self.exposed_ports, 'volumes': self.volumes, + 'mem_limit': mem_limit, 'environment': self.env, 'hostname': self.module.params.get('hostname'), 'domainname': self.module.params.get('domainname'), From b0b1566b8934988c5ba725f34a8744c2092c765a Mon Sep 17 00:00:00 2001 From: Micheal Waltz Date: Tue, 21 Jul 2015 12:58:58 -0700 Subject: [PATCH 225/386] Use proper HostConfig element which contians the proper Memory value - fixes #1766 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e77951abf49..f8532659ecf 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1008,7 +1008,7 @@ class DockerManager(object): except ValueError as e: self.module.fail_json(msg=str(e)) - actual_mem = container['Config']['Memory'] + actual_mem = container['HostConfig']['Memory'] if expected_mem and actual_mem != expected_mem: self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem)) From 51666c6defe04dcb976850be515f47a35a0305dd Mon Sep 17 00:00:00 2001 From: Yuhui Huang Date: Tue, 21 Jul 2015 16:07:25 -0700 Subject: [PATCH 226/386] Checking pip uninstall output in both stdout and stderr --- packaging/language/pip.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index ec0bf93979c..8bbae35038d 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -155,7 +155,7 @@ def _get_cmd_options(module, cmd): words = stdout.strip().split() cmd_options = [ x for x in words if x.startswith('--') ] return cmd_options - + def _get_full_name(name, version=None): if version is None: @@ -356,7 +356,8 @@ def main(): rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir) out += out_pip err += err_pip - if rc == 1 and state == 'absent' and 'not installed' in out_pip: + if rc == 1 and state == 'absent' and \ + ('not installed' in out_pip or 'not installed' in err_pip): pass # rc is 1 when attempting to uninstall non-installed package elif rc != 0: _fail(module, cmd, out, err) From b96d304b93b8768f3427cc5495a66fb56e09453c Mon Sep 17 00:00:00 2001 From: Christian Hammerl Date: Sat, 18 Oct 2014 15:25:07 +0200 Subject: [PATCH 227/386] docker: add support to add/drop capabilities --- cloud/docker/docker.py | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e77951abf49..6cf7ed1d51e 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -292,7 +292,19 @@ options: required: false default: null version_added: "2.0" - + cap_add: + description: + - Add capabilities for the container. Requires docker-py >= 0.5.0. + required: false + default: false + version_added: "2.0" + cap_drop: + description: + - Drop capabilities for the container. Requires docker-py >= 0.5.0. + required: false + default: false + aliases: [] + version_added: "2.0" author: - "Cove Schneider (@cove)" - "Joshua Conner (@joshuaconner)" @@ -551,6 +563,8 @@ class DockerManager(object): 'log_driver': ((1, 2, 0), '1.18'), 'host_config': ((0, 7, 0), '1.15'), 'cpu_set': ((0, 6, 0), '1.14'), + 'cap_add': ((0, 5, 0), '1.14'), + 'cap_drop': ((0, 5, 0), '1.14'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -1321,7 +1335,8 @@ class DockerManager(object): optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver'): + 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', + 'cap_add', 'cap_drop'): optionals[optional_param] = self.module.params.get(optional_param) if optionals['dns'] is not None: @@ -1356,6 +1371,14 @@ class DockerManager(object): log_config.type = optionals['log_driver'] params['log_config'] = log_config + if optionals['cap_add'] is not None: + self.ensure_capability('cap_add') + params['cap_add'] = optionals['cap_add'] + + if optionals['cap_drop'] is not None: + self.ensure_capability('cap_drop') + params['cap_drop'] = optionals['cap_drop'] + return docker.utils.create_host_config(**params) def create_containers(self, count=1): @@ -1609,6 +1632,8 @@ def main(): insecure_registry = dict(default=False, type='bool'), log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), cpu_set = dict(default=None), + cap_add = dict(default=None, type='list'), + cap_drop = dict(default=None, type='list'), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From fd6518179b32c53103fe0c4b1dbbeb4486ed7532 Mon Sep 17 00:00:00 2001 From: Jeff Widman Date: Tue, 2 Jun 2015 14:07:30 -0700 Subject: [PATCH 228/386] Move validate command into doc fragment and fix a few typos --- files/copy.py | 11 ++--------- files/lineinfile.py | 11 +---------- files/replace.py | 7 +------ files/template.py | 10 +--------- 4 files changed, 5 insertions(+), 34 deletions(-) diff --git a/files/copy.py b/files/copy.py index b7f333cead6..ad56800764b 100644 --- a/files/copy.py +++ b/files/copy.py @@ -63,21 +63,13 @@ options: force: description: - the default is C(yes), which will replace the remote file when contents - are different than the source. If C(no), the file will only be transferred + are different than the source. If C(no), the file will only be transferred if the destination does not exist. version_added: "1.1" required: false choices: [ "yes", "no" ] default: "yes" aliases: [ "thirsty" ] - validate: - description: - - The validation command to run before copying into place. The path to the file to - validate is passed in via '%s' which must be present as in the visudo example below. - The command is passed securely so shell features like expansion and pipes won't work. - required: false - default: "" - version_added: "1.2" directory_mode: description: - When doing a recursive copy set the mode for the directories. If this is not set we will use the system @@ -86,6 +78,7 @@ options: required: false version_added: "1.5" extends_documentation_fragment: files +extends_documentation_fragment: validate author: - "Ansible Core Team" - "Michael DeHaan" diff --git a/files/lineinfile.py b/files/lineinfile.py index 6bcfb3b3060..777f0a498a9 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -31,6 +31,7 @@ author: - "Daniel Hokka Zakrissoni (@dhozac)" - "Ahti Kitsik (@ahtik)" extends_documentation_fragment: files +extends_documentation_fragment: validate short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression. description: @@ -116,16 +117,6 @@ options: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. - validate: - required: false - description: - - validation to run before copying into place. - Use %s in the command to indicate the current file to validate. - The command is passed securely so shell features like - expansion and pipes won't work. - required: false - default: None - version_added: "1.4" others: description: - All arguments accepted by the M(file) module also work here. diff --git a/files/replace.py b/files/replace.py index fa0142823ea..dea2c32a54f 100644 --- a/files/replace.py +++ b/files/replace.py @@ -27,6 +27,7 @@ DOCUMENTATION = """ module: replace author: "Evan Kaufman (@EvanK)" extends_documentation_fragment: files +extends_documentation_fragment: validate short_description: Replace all instances of a particular string in a file using a back-referenced regular expression. description: @@ -61,12 +62,6 @@ options: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. - validate: - required: false - description: - - validation to run before copying into place - required: false - default: None others: description: - All arguments accepted by the M(file) module also work here. diff --git a/files/template.py b/files/template.py index a1dc72c27bd..120917f49c2 100644 --- a/files/template.py +++ b/files/template.py @@ -38,15 +38,6 @@ options: required: false choices: [ "yes", "no" ] default: "no" - validate: - description: - - The validation command to run before copying into place. - - The path to the file to validate is passed in via '%s' which must be present as in the visudo example below. - - validation to run before copying into place. The command is passed - securely so shell features like expansion and pipes won't work. - required: false - default: "" - version_added: "1.2" force: description: - the default is C(yes), which will replace the remote file when contents @@ -62,6 +53,7 @@ author: - Ansible Core Team - Michael DeHaan extends_documentation_fragment: files +extends_documentation_fragment: validate ''' EXAMPLES = ''' From 2a0f6c1cb33cf2cab5303ca1f26852011c7e2a80 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Wed, 22 Jul 2015 12:05:21 +1000 Subject: [PATCH 229/386] Correct handling of empty role_attr_flags role_attr_flags is the empty string by default, not None. --- database/postgresql/postgresql_user.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index d3f6d81c360..353d3ac3d93 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -92,7 +92,7 @@ options: description: - "PostgreSQL role attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER" required: false - default: null + default: "" choices: [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB", "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ] state: @@ -233,7 +233,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir return False # Handle passwords. - if not no_password_changes and (password is not None or role_attr_flags is not None): + if not no_password_changes and (password is not None or role_attr_flags != ''): # Select password and all flag-like columns in order to verify changes. query_password_data = dict(password=password, expires=expires) select = "SELECT * FROM pg_authid where rolname=%(user)s" From 47cb92f74f17c31c72a535e3ae153c328f85be53 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Wed, 22 Jul 2015 13:34:52 +1000 Subject: [PATCH 230/386] Ensure TEMP privilege gets removed when expanding ALL. ALL gets expanded to the list of VALID_PRIVS which includes TEMPORARY and TEMP The code that replaced TEMP with TEMPORARY didn't work with the expansion --- database/postgresql/postgresql_user.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 353d3ac3d93..cee5a9ae131 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -490,10 +490,10 @@ def parse_role_attrs(role_attr_flags): def normalize_privileges(privs, type_): new_privs = set(privs) - if 'ALL' in privs: + if 'ALL' in new_privs: new_privs.update(VALID_PRIVS[type_]) new_privs.remove('ALL') - if 'TEMP' in privs: + if 'TEMP' in new_privs: new_privs.add('TEMPORARY') new_privs.remove('TEMP') From dc9dfa9ef8e21eb7fc037c0f8c54d510a77e1beb Mon Sep 17 00:00:00 2001 From: Baraa Basata Date: Wed, 22 Jul 2015 00:03:33 -0400 Subject: [PATCH 231/386] Fix iam_policy example --- cloud/amazon/iam_policy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 5026169e104..9213d1585b0 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -120,7 +120,7 @@ tasks: iam_policy: iam_type: user iam_name: "{{ item.user }}" - policy_name: "s3_limited_access_{{ item.s3_user_prefix }}" + policy_name: "s3_limited_access_{{ item.prefix }}" state: present policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} " with_items: From 851e55b55fb916c35cbec7779da116281b279e4a Mon Sep 17 00:00:00 2001 From: Johan Haals Date: Wed, 22 Jul 2015 08:28:16 +0200 Subject: [PATCH 232/386] changed version_added for read_only param this feature will be released in 2.0 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 1bd49b0f66d..919ff62ba1d 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -270,7 +270,7 @@ options: - Mount the container's root filesystem as read only default: false aliases: [] - version_added: "1.9" + version_added: "2.0" restart_policy: description: - Container restart policy. From 313381981e14aa1d92ff41de362b28c127fd1fe6 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Wed, 22 Jul 2015 03:14:20 -0400 Subject: [PATCH 233/386] Save user after creating before trying to read/set other properties. Fixes #1241 --- windows/win_user.ps1 | 1 + 1 file changed, 1 insertion(+) diff --git a/windows/win_user.ps1 b/windows/win_user.ps1 index ae4847a8528..b7be7e4eea3 100644 --- a/windows/win_user.ps1 +++ b/windows/win_user.ps1 @@ -146,6 +146,7 @@ If ($state -eq 'present') { If ($password -ne $null) { $user_obj.SetPassword($password) } + $user_obj.SetInfo() $result.changed = $true } ElseIf (($password -ne $null) -and ($update_password -eq 'always')) { From 2133cb11713b1f4f945b0b0fc032a0b17d752aa8 Mon Sep 17 00:00:00 2001 From: Artur Cygan Date: Wed, 22 Jul 2015 11:36:32 +0200 Subject: [PATCH 234/386] Update route53.py Fix typos --- cloud/amazon/route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index c659843b9a3..aca01193555 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -213,7 +213,7 @@ EXAMPLES = ''' - route53: command: "create" zone: "foo.com" - hostes_zone_id: "Z2AABBCCDDEEFF" + hosted_zone_id: "Z2AABBCCDDEEFF" record: "localhost.foo.com" type: "AAAA" ttl: "7200" @@ -224,7 +224,7 @@ EXAMPLES = ''' - route53: command: "create" zone: "foo.com" - hostes_zone_id: "Z2AABBCCDDEEFF" + hosted_zone_id: "Z2AABBCCDDEEFF" record: "localhost.foo.com" type: "AAAA" ttl: "7200" From 222927256d14b8e530853f0a2c7cb878a3e991fd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 22 Jul 2015 07:08:37 -0700 Subject: [PATCH 235/386] Remove validate_certs as the url is not user settable so we always want to validate the certificate --- cloud/amazon/_ec2_ami_search.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 4fac97e9471..ec9da6d4988 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -65,15 +65,6 @@ options: required: false default: paravirtual choices: ["paravirtual", "hvm"] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only - be set to C(no) used on personally controlled sites using self-signed - certificates. Prior to 1.9.3 the code defaulted to C(no). - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: '1.9.3' author: Lorin Hochstein ''' @@ -193,7 +184,6 @@ def main(): region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), virt=dict(required=False, default='paravirtual', choices=['paravirtual', 'hvm']), - validate_certs = dict(required=False, default=True, type='bool'), ) module = AnsibleModule(argument_spec=arg_spec) distro = module.params['distro'] From 305970569d542bf26779d90117c2e7d580ea5bdf Mon Sep 17 00:00:00 2001 From: Igor Khomyakov Date: Wed, 22 Jul 2015 19:19:00 +0300 Subject: [PATCH 236/386] fixed 'present' --- web_infrastructure/supervisorctl.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index 43fa95467fb..9bc4c0b8afa 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -75,8 +75,8 @@ notes: - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). requirements: [ "supervisorctl" ] -author: - - "Matt Wright (@mattupstate)" +author: + - "Matt Wright (@mattupstate)" - "Aaron Wang (@inetfuture) " ''' @@ -194,14 +194,12 @@ def main(): if state == 'restarted': rc, out, err = run_supervisorctl('update', check_rc=True) processes = get_matched_processes() - if not processes: + if len(processes) == 0: module.fail_json(name=name, msg="ERROR (no such process)") take_action_on_processes(processes, lambda s: True, 'restart', 'started') processes = get_matched_processes() - if not processes: - module.fail_json(name=name, msg="ERROR (no such process)") if state == 'absent': if len(processes) == 0: @@ -230,9 +228,13 @@ def main(): module.fail_json(msg=out, name=name, state=state) if state == 'started': + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started') if state == 'stopped': + if len(processes) == 0: + module.fail_json(name=name, msg="ERROR (no such process)") take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped') # import module snippets From 99e3881a7c3b21bab079582f73f1feda8c1630d0 Mon Sep 17 00:00:00 2001 From: billwanjohi Date: Wed, 22 Jul 2015 20:46:42 +0000 Subject: [PATCH 237/386] iam: don't delete passwords by default --- cloud/amazon/iam.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index bda953faab4..aa5777ba3b3 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -280,12 +280,6 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key module.fail_json(changed=False, msg="Passsword doesn't conform to policy") else: module.fail_json(msg=error_msg) - else: - try: - iam.delete_login_profile(name) - changed = True - except boto.exception.BotoServerError: - pass if key_state == 'create': try: From e13e369aaed1973054e9f7ea15c16782db83a7c8 Mon Sep 17 00:00:00 2001 From: Guilherme Carlos Date: Wed, 22 Jul 2015 17:55:35 -0300 Subject: [PATCH 238/386] Fix login_user on mysql_user `login_username` was provided but `login_user` is the correct option --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 0ff290f1185..36c400553ca 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -109,7 +109,7 @@ options: notes: - Requires the MySQLdb Python package on the remote host. For Ubuntu, this is as easy as apt-get install python-mysqldb. - - Both C(login_password) and C(login_username) are required when you are + - Both C(login_password) and C(login_user) are required when you are passing credentials. If none are present, the module will attempt to read the credentials from C(~/.my.cnf), and finally fall back to using the MySQL default login of 'root' with no password. From e5e0a70fc1e7a4b3f1a89be376bdd1178ff3c988 Mon Sep 17 00:00:00 2001 From: queridiculo Date: Wed, 1 Jul 2015 17:14:55 -0400 Subject: [PATCH 239/386] yum: improved check_mode handling and package update flow. --- packaging/os/yum.py | 167 ++++++++++++++++++++++++++++++-------------- 1 file changed, 114 insertions(+), 53 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 14339b4c18b..cf321b31d13 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -212,7 +212,7 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di for rid in en_repos: my.repos.enableRepo(rid) - e,m,u = my.rpmdb.matchPackageNames([pkgspec]) + e, m, u = my.rpmdb.matchPackageNames([pkgspec]) pkgs = e + m if not pkgs: pkgs.extend(my.returnInstalledPackagesByDep(pkgspec)) @@ -224,16 +224,16 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di else: cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) + rc, out, err = module.run_command(cmd) if not is_pkg: cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec] - rc2,out2,err2 = module.run_command(cmd) + rc2, out2, err2 = module.run_command(cmd) else: - rc2,out2,err2 = (0, '', '') + rc2, out2, err2 = (0, '', '') if rc == 0 and rc2 == 0: out += out2 - return [ p for p in out.split('\n') if p.strip() ] + return [p for p in out.split('\n') if p.strip()] else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) @@ -541,7 +541,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): module.fail_json(msg="Failure downloading %s, %s" % (spec, e)) #groups :( - elif spec.startswith('@'): + elif spec.startswith('@'): # complete wild ass guess b/c it's a group pkg = spec @@ -608,7 +608,11 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): shutil.rmtree(tempdir) except Exception, e: module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) - module.exit_json(changed=True) + + for p in pkgs: + # take note of which packages are getting installed + res['results'].append('%s will be installed' % p) + module.exit_json(changed=True, results=res['results']) changed = True @@ -676,7 +680,10 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): cmd = yum_basecmd + ["remove"] + pkgs if module.check_mode: - module.exit_json(changed=True) + # take note of which packages are getting removed + for p in pkgs: + res['results'].append('%s will be removed' % p) + module.exit_json(changed=True, results=res['results']) rc, out, err = module.run_command(cmd) @@ -711,47 +718,69 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['msg'] = '' res['changed'] = False res['rc'] = 0 + pkgs = {} + pkgs['update'] = [] + pkgs['install'] = [] + updates = {} + update_all = False + cmd = None - for spec in items: + # determine if we're doing an update all + if '*' in items: + update_all = True - pkg = None - basecmd = 'update' - cmd = '' - # groups, again - if spec.startswith('@'): - pkg = spec - - elif spec == '*': #update all - # use check-update to see if there is any need - rc,out,err = module.run_command(yum_basecmd + ['check-update']) - if rc == 100: - cmd = yum_basecmd + [basecmd] - else: - res['results'].append('All packages up to date') + # run check-update to see if we have packages pending + rc, out, err = module.run_command(yum_basecmd + ['check-update']) + if rc == 0 and update_all: + res['results'].append('Nothing to do here, all packages are up to date') + return res + elif rc == 100: + available_updates = out.split('\n') + # build update dictionary + for line in available_updates: + line = line.split() + # ignore irrelevant lines + # FIXME... revisit for something less kludgy + if '*' in line or len(line) != 3 or '.' not in line[0]: continue - - # dep/pkgname - find it - else: - if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): - basecmd = 'update' else: - basecmd = 'install' + pkg, version, repo = line + name, dist = pkg.split('.') + updates.update({name: {'version': version, 'dist': dist, 'repo': repo}}) + elif rc == 1: + res['msg'] = err + res['rc'] = rc + module.fail_json(**res) + if update_all: + cmd = yum_basecmd + ['update'] + else: + for spec in items: + # some guess work involved with groups. update @ will install the group if missing + if spec.startswith('@'): + pkgs['update'].append(spec) + continue + # dep/pkgname - find it + else: + if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): + pkgs['update'].append(spec) + else: + pkgs['install'].append(spec) pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) + # FIXME..? may not be desirable to throw an exception here if a single package is missing if not pkglist: res['msg'] += "No Package matching '%s' found available, installed or updated" % spec module.fail_json(**res) - + nothing_to_do = True for this in pkglist: - if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): + if spec in pkgs['install'] and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): nothing_to_do = False break - - if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): - nothing_to_do = False - break - + + if spec in pkgs['update'] and spec in updates.keys(): + nothing_to_do = False + if nothing_to_do: res['results'].append("All packages providing %s are up to date" % spec) continue @@ -763,27 +792,60 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) module.fail_json(**res) - pkg = spec - if not cmd: - cmd = yum_basecmd + [basecmd, pkg] + # list of package updates + if update_all: + will_update = updates.keys() + else: + will_update = [u for u in pkgs['update'] if u in updates.keys() or u.startswith('@')] - if module.check_mode: - return module.exit_json(changed=True) + # check_mode output + if module.check_mode: + for w in will_update: + if w.startswith('@'): + msg = '%s will be updated' % w + else: + msg = '%s will be updated with %s-%s.%s from %s' % (w, w, updates[w]['version'], updates[w]['dist'], updates[w]['repo']) + res['results'].append(msg) - rc, out, err = module.run_command(cmd) + for p in pkgs['install']: + res['results'].append('%s will be installed' % p) - res['rc'] += rc - res['results'].append(out) - res['msg'] += err - - # FIXME if it is - update it and check to see if it applied - # check to see if there is no longer an update available for the pkgspec - - if rc: - res['failed'] = True - else: + if len(will_update) > 0 or len(pkgs['install']) > 0: res['changed'] = True + return res + + # run commands + if cmd: # update all + rc, out, err = module.run_command(cmd) + res['changed'] = True + else: + if len(pkgs['install']) > 0: # install missing + cmd = yum_basecmd + ['install'] + pkgs['install'] + rc, out, err = module.run_command(cmd) + res['changed'] = True + else: + rc, out, err = [0, '', ''] + + if len(will_update) > 0: # update present + cmd = yum_basecmd + ['update'] + pkgs['update'] + rc2, out2, err2 = module.run_command(cmd) + res['changed'] = True + else: + rc2, out2, err2 = [0, '', ''] + + if not update_all: + rc += rc2 + out += out2 + err += err2 + + res['rc'] += rc + res['msg'] += err + res['results'].append(out) + + if rc: + res['failed'] = True + return res def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, @@ -927,4 +989,3 @@ from ansible.module_utils.basic import * from ansible.module_utils.urls import * if __name__ == '__main__': main() - From 1ea03e71825a652a8b83ab6ab933df9994e60c22 Mon Sep 17 00:00:00 2001 From: Johan Haals Date: Thu, 23 Jul 2015 16:45:01 +0200 Subject: [PATCH 240/386] ensures API compatibility for read_only containers --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 919ff62ba1d..035766fac77 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -571,6 +571,7 @@ class DockerManager(object): 'cpu_set': ((0, 6, 0), '1.14'), 'cap_add': ((0, 5, 0), '1.14'), 'cap_drop': ((0, 5, 0), '1.14'), + 'read_only': ((1, 0, 0), '1.17'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } From 8ffe34dcf1ac663a00237048d62b2f04c029ded5 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 23 Jul 2015 14:23:00 -0400 Subject: [PATCH 241/386] Add new policy guidelines for Core --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e441a4e3527..ea9c4ced04e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,6 +22,10 @@ I'd also read the community page above, but in particular, make sure you copy [t Also please make sure you are testing on the latest released version of Ansible or the development branch. +If you'd like to contribute code to an existing module +====================================================== +Each module in Core is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team. + Thanks! From 77d1c896fea212f0421c7cae6f2e56e9f7befc82 Mon Sep 17 00:00:00 2001 From: Micheal Waltz Date: Thu, 23 Jul 2015 11:34:33 -0700 Subject: [PATCH 242/386] Set the API version when checking differences in containers and use this to determine the location of the Memory value depending on the version used. In v1.18 and earlier it was ['Config']['Memory'], but in v1.19 it changed to ['HostConfig']['Memory']. --- cloud/docker/docker.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f8532659ecf..8055fbf23c9 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -942,6 +942,9 @@ class DockerManager(object): running = self.get_running_containers() current = self.get_inspect_containers(running) + #Get API version + api_version = self.client.version()['ApiVersion'] + image = self.get_inspect_image() if image is None: # The image isn't present. Assume that we're about to pull a new @@ -1010,6 +1013,10 @@ class DockerManager(object): actual_mem = container['HostConfig']['Memory'] + #Use v1.18 API and earlier Memory element location + if docker_api_version <= 1.18: + actual_mem = container['Config']['Memory'] + if expected_mem and actual_mem != expected_mem: self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem)) differing.append(container) From 613fe54908eb406ca942e15aa340259fdd6f0837 Mon Sep 17 00:00:00 2001 From: Micheal Waltz Date: Thu, 23 Jul 2015 12:17:26 -0700 Subject: [PATCH 243/386] Use a if/else instead to avoid loading possibly invalid values for Memory --- cloud/docker/docker.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 8055fbf23c9..df7eb141aec 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1011,10 +1011,10 @@ class DockerManager(object): except ValueError as e: self.module.fail_json(msg=str(e)) - actual_mem = container['HostConfig']['Memory'] - - #Use v1.18 API and earlier Memory element location - if docker_api_version <= 1.18: + #For v1.19 API and above use HostConfig, otherwise use Config + if docker_api_version >= 1.19: + actual_mem = container['HostConfig']['Memory'] + else: actual_mem = container['Config']['Memory'] if expected_mem and actual_mem != expected_mem: From 6a872469fc1bda54126dfa91338cf7a26bde701c Mon Sep 17 00:00:00 2001 From: fti7 Date: Fri, 17 Oct 2014 18:23:25 +0200 Subject: [PATCH 244/386] mount: Support for Check-Mode --- system/mount.py | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/system/mount.py b/system/mount.py index f052e36dd2d..ff7094dad3b 100644 --- a/system/mount.py +++ b/system/mount.py @@ -108,7 +108,7 @@ def _escape_fstab(v): """ escape space (040), ampersand (046) and backslash (134) which are invalid in fstab fields """ return v.replace('\\', '\\134').replace(' ', '\\040').replace('&', '\\046') -def set_mount(**kwargs): +def set_mount(module, **kwargs): """ set/change a mount point location in fstab """ # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab @@ -167,14 +167,14 @@ def set_mount(**kwargs): to_write.append(new_line % args) changed = True - if changed: + if changed and not module.check_mode: write_fstab(to_write, args['fstab']) # mount function needs origname return (origname, changed) -def unset_mount(**kwargs): +def unset_mount(module, **kwargs): """ remove a mount point from fstab """ # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab @@ -217,7 +217,7 @@ def unset_mount(**kwargs): # if we got here we found a match - continue and mark changed changed = True - if changed: + if changed and not module.check_mode: write_fstab(to_write, args['fstab']) # umount needs origname @@ -281,7 +281,8 @@ def main(): src = dict(required=True), fstype = dict(required=True), fstab = dict(default='/etc/fstab') - ) + ), + supports_check_mode=True ) @@ -316,8 +317,8 @@ def main(): state = module.params['state'] name = module.params['name'] if state == 'absent': - name, changed = unset_mount(**args) - if changed: + name, changed = unset_mount(module, **args) + if changed and not module.check_mode: if os.path.ismount(name): res,msg = umount(module, **args) if res: @@ -333,26 +334,27 @@ def main(): if state == 'unmounted': if os.path.ismount(name): - res,msg = umount(module, **args) - if res: - module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) + if not module.check_mode: + res,msg = umount(module, **args) + if res: + module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) changed = True module.exit_json(changed=changed, **args) if state in ['mounted', 'present']: if state == 'mounted': - if not os.path.exists(name): + if not os.path.exists(name) and not module.check_mode: try: os.makedirs(name) except (OSError, IOError), e: module.fail_json(msg="Error making dir %s: %s" % (name, str(e))) - name, changed = set_mount(**args) + name, changed = set_mount(module, **args) if state == 'mounted': res = 0 if os.path.ismount(name): - if changed: + if changed and not module.check_mode: res,msg = mount(module, **args) elif 'bind' in args.get('opts', []): changed = True @@ -367,7 +369,9 @@ def main(): res,msg = mount(module, **args) else: changed = True - res,msg = mount(module, **args) + if not module.check_mode: + res,msg = mount(module, **args) + if res: module.fail_json(msg="Error mounting %s: %s" % (name, msg)) From 03a809a21c0fc683c8e78666e633dfbd85ee216f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 23 Jul 2015 15:52:11 -0400 Subject: [PATCH 245/386] added version_added to body_format in uri --- network/basics/uri.py | 1 + 1 file changed, 1 insertion(+) diff --git a/network/basics/uri.py b/network/basics/uri.py index d7e5eee427c..3babba6d609 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -71,6 +71,7 @@ options: required: false choices: [ "raw", "json" ] default: raw + version_added: "2.0" method: description: - The HTTP method of the request or response. From 5f2b365faa456990dfa65c7d4ba3393168069f67 Mon Sep 17 00:00:00 2001 From: khassen Date: Thu, 13 Nov 2014 20:58:00 -0500 Subject: [PATCH 246/386] Use the common/shared MD5 function. --- cloud/amazon/s3.py | 13 ++----------- cloud/google/gc_storage.py | 4 ++-- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 072c8bc40d4..e6b511b36b8 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -175,7 +175,6 @@ EXAMPLES = ''' import os import urlparse -import hashlib from ssl import SSLError try: @@ -356,13 +355,6 @@ def is_walrus(s3_url): else: return False -def get_md5_digest(local_file): - md5 = hashlib.md5() - with open(local_file, 'rb') as f: - for data in f.read(1024 ** 2): - md5.update(data) - return md5.hexdigest() - def main(): argument_spec = ec2_argument_spec() @@ -488,8 +480,7 @@ def main(): # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if pathrtn is True: md5_remote = keysum(module, s3, bucket, obj, version=version) - md5_local = get_md5_digest(dest) - + md5_local = module.md5(dest) if md5_local == md5_remote: sum_matches = True if overwrite == 'always': @@ -532,7 +523,7 @@ def main(): # Lets check key state. Does it exist and if it does, compute the etag md5sum. if bucketrtn is True and keyrtn is True: md5_remote = keysum(module, s3, bucket, obj) - md5_local = get_md5_digest(src) + md5_local = module.md5(src) if md5_local == md5_remote: sum_matches = True diff --git a/cloud/google/gc_storage.py b/cloud/google/gc_storage.py index 280bc42a219..c1e6f5707a6 100644 --- a/cloud/google/gc_storage.py +++ b/cloud/google/gc_storage.py @@ -284,7 +284,7 @@ def get_download_url(module, gs, bucket, obj, expiry): def handle_get(module, gs, bucket, obj, overwrite, dest): md5_remote = keysum(module, gs, bucket, obj) - md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() + md5_local = module.md5(dest) if md5_local == md5_remote: module.exit_json(changed=False) if md5_local != md5_remote and not overwrite: @@ -300,7 +300,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration): # Lets check key state. Does it exist and if it does, compute the etag md5sum. if bucket_rc and key_rc: md5_remote = keysum(module, gs, bucket, obj) - md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() + md5_local = module.md5(src) if md5_local == md5_remote: module.exit_json(msg="Local and remote object are identical", changed=False) if md5_local != md5_remote and not overwrite: From 349195e6894ffa2cb06fb78d3fe49511ebcc5480 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Thu, 26 Feb 2015 15:10:28 +0100 Subject: [PATCH 247/386] [user] Allow '*' as empty password. If `password` is defined as `*` `useradd` or `usermod` returns an error: msg: usermod: Invalid password: `*' This works very well on Linux host to not define any password for a user (mainly useful if your setup is only based on SSH keys for auth). On OpenBSD this does not work, so we have to ignore the encrypted password parameter if it defined as `*`. --- system/user.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/system/user.py b/system/user.py index 1045df70e67..61c0b0c2eda 100644 --- a/system/user.py +++ b/system/user.py @@ -916,7 +916,7 @@ class OpenBSDUser(User): cmd.append('-L') cmd.append(self.login_class) - if self.password is not None: + if self.password is not None and self.password != '*': cmd.append('-p') cmd.append(self.password) @@ -1010,7 +1010,8 @@ class OpenBSDUser(User): cmd.append('-L') cmd.append(self.login_class) - if self.update_password == 'always' and self.password is not None and info[1] != self.password: + if self.update_password == 'always' and self.password is not None \ + and self.password != '*' and info[1] != self.password: cmd.append('-p') cmd.append(self.password) From a3b9fb58bfc1935dd49bdb284cf62d6ed9662909 Mon Sep 17 00:00:00 2001 From: KIKUCHI Koichiro Date: Wed, 22 Jul 2015 19:10:05 +0900 Subject: [PATCH 248/386] Fix service enabled check failure on FreeBSD --- system/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 4336b7a7775..8caece20143 100644 --- a/system/service.py +++ b/system/service.py @@ -988,7 +988,7 @@ class FreeBsdService(Service): try: return self.service_enable_rcconf() - except: + except Exception: self.module.fail_json(msg='unable to set rcvar') def service_control(self): From 777d736baa0e1a3f0943b03a5dc2d1d219f96fe7 Mon Sep 17 00:00:00 2001 From: Dan Date: Tue, 5 May 2015 23:31:02 +0100 Subject: [PATCH 249/386] Added header support Added the ability to pass custom http headers to the fetch_url method. --- network/basics/get_url.py | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 66fc71b78da..d755808485e 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -98,6 +98,12 @@ options: required: false default: 10 version_added: '1.8' + headers: + description: + - Add custom HTTP headers to a request in the format 'key:value,key:value' + required: false + default: null + version_added: '1.9' url_username: description: - The username for use in HTTP basic authentication. This parameter can be used @@ -138,6 +144,9 @@ EXAMPLES=''' - name: download file and force basic auth get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes + +- name: download file with custom HTTP headers + get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf headers: 'key:value,key:value' ''' import urlparse @@ -157,14 +166,14 @@ def url_filename(url): return 'index.html' return fn -def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10): +def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None): """ Download data from the url and store in a temporary file. Return (tempfile, info about the request) """ - rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout) + rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers) if info['status'] == 304: module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) @@ -214,6 +223,7 @@ def main(): dest = dict(required=True), sha256sum = dict(default=''), timeout = dict(required=False, type='int', default=10), + headers = dict(required=False, default=None), ) module = AnsibleModule( @@ -228,6 +238,15 @@ def main(): sha256sum = module.params['sha256sum'] use_proxy = module.params['use_proxy'] timeout = module.params['timeout'] + + # Parse headers to dict + if module.params['headers']: + try: + headers = dict(item.split(':') for item in module.params['headers'].split(',')) + except: + module.fail_json(msg="The header parameter requires a key:value,key:value syntax to be properly parsed.") + else: + headers = None dest_is_dir = os.path.isdir(dest) last_mod_time = None @@ -263,7 +282,7 @@ def main(): last_mod_time = datetime.datetime.utcfromtimestamp(mtime) # download to tmpsrc - tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout) + tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers) # Now the request has completed, we can finally generate the final # destination file name from the info dict. From 22461e335a68af1bb6bf43f63231c5dd7c0aa0b1 Mon Sep 17 00:00:00 2001 From: Feanil Patel Date: Fri, 24 Jul 2015 09:38:11 -0400 Subject: [PATCH 250/386] If overwrite is set, download eagerly. If the we are going to overwrite the file anyway, there is no need to do any checksums locally or grab the remote etag. --- cloud/amazon/s3.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index e6b511b36b8..ecc2a9dc69e 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -472,10 +472,10 @@ def main(): else: module.fail_json(msg="Key %s does not exist."%obj, failed=True) - # If the destination path doesn't exist, no need to md5um etag check, so just download. + # If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download. pathrtn = path_check(dest) - if pathrtn is False: - download_s3file(module, s3, bucket, obj, dest, retries, version=version) + if pathrtn is False or overwrite == 'always': + download_s3file(module, s3, bucket, obj, dest) # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if pathrtn is True: @@ -499,10 +499,6 @@ def main(): if sum_matches is True and overwrite == 'never': module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) - # At this point explicitly define the overwrite condition. - if sum_matches is True and pathrtn is True and overwrite == 'always': - download_s3file(module, s3, bucket, obj, dest, retries, version=version) - # if our mode is a PUT operation (upload), go through the procedure as appropriate ... if mode == 'put': From 76344db61e6de2a0a5c18bc2734f26704caba31f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 24 Jul 2015 12:12:40 -0400 Subject: [PATCH 251/386] fixed version added --- network/basics/get_url.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index d755808485e..01479260277 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -103,7 +103,7 @@ options: - Add custom HTTP headers to a request in the format 'key:value,key:value' required: false default: null - version_added: '1.9' + version_added: '2.0' url_username: description: - The username for use in HTTP basic authentication. This parameter can be used From c688408385e39eb468ec1c3fc1010f008c75af87 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 24 Jul 2015 17:54:02 -0400 Subject: [PATCH 252/386] fixed fragment docs --- files/copy.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/files/copy.py b/files/copy.py index ad56800764b..711c4ec8e52 100644 --- a/files/copy.py +++ b/files/copy.py @@ -77,9 +77,10 @@ options: already existed. required: false version_added: "1.5" -extends_documentation_fragment: files -extends_documentation_fragment: validate -author: +extends_documentation_fragment: + - files + - validate +author: - "Ansible Core Team" - "Michael DeHaan" notes: From 9b8470ae95b92a80da5d233d6a1f9d1755021cce Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 24 Jul 2015 17:57:13 -0400 Subject: [PATCH 253/386] minor doc fixes --- files/lineinfile.py | 7 ++++--- files/replace.py | 5 +++-- files/template.py | 11 ++++------- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/files/lineinfile.py b/files/lineinfile.py index 777f0a498a9..45dd12ec135 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -27,11 +27,12 @@ import tempfile DOCUMENTATION = """ --- module: lineinfile -author: +author: - "Daniel Hokka Zakrissoni (@dhozac)" - "Ahti Kitsik (@ahtik)" -extends_documentation_fragment: files -extends_documentation_fragment: validate +extends_documentation_fragment: + - files + - validate short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression. description: diff --git a/files/replace.py b/files/replace.py index dea2c32a54f..765f60f5c8f 100644 --- a/files/replace.py +++ b/files/replace.py @@ -26,8 +26,9 @@ DOCUMENTATION = """ --- module: replace author: "Evan Kaufman (@EvanK)" -extends_documentation_fragment: files -extends_documentation_fragment: validate +extends_documentation_fragment: + - files + - validate short_description: Replace all instances of a particular string in a file using a back-referenced regular expression. description: diff --git a/files/template.py b/files/template.py index 120917f49c2..637cab5ca7b 100644 --- a/files/template.py +++ b/files/template.py @@ -24,13 +24,10 @@ options: description: - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. required: true - default: null - aliases: [] dest: description: - Location to render the template to on the remote machine. required: true - default: null backup: description: - Create a backup file including the timestamp information so you can get @@ -48,12 +45,12 @@ options: default: "yes" notes: - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." -requirements: [] author: - - Ansible Core Team + - Ansible Core Team - Michael DeHaan -extends_documentation_fragment: files -extends_documentation_fragment: validate +extends_documentation_fragment: + - files + - validate ''' EXAMPLES = ''' From fd0fc5131c45660f66a58aa1c04d83180f86d5e5 Mon Sep 17 00:00:00 2001 From: Jens Carl Date: Fri, 24 Jul 2015 22:06:56 +0000 Subject: [PATCH 254/386] Fix missing alias "groups". --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index a6b378c7e9c..8a50359b71b 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -1226,7 +1226,7 @@ def main(): argument_spec.update(dict( key_name = dict(aliases = ['keypair']), id = dict(), - group = dict(type='list'), + group = dict(type='list', aliases=['groups']), group_id = dict(type='list'), zone = dict(aliases=['aws_zone', 'ec2_zone']), instance_type = dict(aliases=['type']), From 481f3008364920345573c92a9cddbc8c5b090a3f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 24 Jul 2015 23:32:41 -0400 Subject: [PATCH 255/386] clarified error messages to actually give back ansible module option instead of internal boto field name --- cloud/amazon/ec2.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 8a50359b71b..55c45a647f4 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -1283,21 +1283,21 @@ def main(): else: module.fail_json(msg="region must be specified") - tagged_instances = [] + tagged_instances = [] - state = module.params.get('state') + state = module.params['state'] if state == 'absent': - instance_ids = module.params.get('instance_ids') - if not isinstance(instance_ids, list): - module.fail_json(msg='termination_list needs to be a list of instances to terminate') + instance_ids = module.params['instance_ids'] + if not instance_ids: + module.fail_json(msg='instance_ids list is required for absent state') (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids) elif state in ('running', 'stopped'): - instance_ids = module.params.get('instance_ids') - if not isinstance(instance_ids, list): - module.fail_json(msg='running list needs to be a list of instances to run: %s' % instance_ids) + instance_ids = module.params['instance_ids'] + if not instance_ids: + module.fail_json(msg='instance_ids list is requried for %s state' % state) (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state) From b991a2708b75c781f6bb12db623840e245d0fe99 Mon Sep 17 00:00:00 2001 From: saito-hideki Date: Sat, 25 Jul 2015 21:48:13 +0900 Subject: [PATCH 256/386] Add Solaris support in hostname module --- system/hostname.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/system/hostname.py b/system/hostname.py index d9193641eb2..2442f0ad3cc 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -363,6 +363,41 @@ class OpenBSDStrategy(GenericStrategy): # =========================================== +class SolarisStrategy(GenericStrategy): + """ + This is a Solaris11 or later Hostname manipulation strategy class - it + execute hostname command. + """ + + HOSTNAME_CMD = '/usr/bin/hostname' + + def set_current_hostname(self, name): + cmd_option = '-t' + cmd = [self.HOSTNAME_CMD, cmd_option, name] + rc, out, err = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % + (rc, out, err)) + + def get_permanent_hostname(self): + fmri = 'svc:/system/identity:node' + pattern = 'config/nodename' + cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern) + rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True) + if rc != 0: + self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % + (rc, out, err)) + return out.strip() + + def set_permanent_hostname(self, name): + cmd = [self.HOSTNAME_CMD, name] + rc, out, err = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % + (rc, out, err)) + +# =========================================== + class FedoraHostname(Hostname): platform = 'Linux' distribution = 'Fedora' @@ -486,6 +521,11 @@ class OpenBSDHostname(Hostname): distribution = None strategy_class = OpenBSDStrategy +class SolarisHostname(Hostname): + platform = 'SunOS' + distribution = None + strategy_class = SolarisStrategy + # =========================================== def main(): From e91ad2ab6c702b6878f5506fb682c2aac28a394f Mon Sep 17 00:00:00 2001 From: saito-hideki Date: Sat, 25 Jul 2015 23:32:55 +0900 Subject: [PATCH 257/386] Add Solaris support in hostname module feature_pull_request: #1804 --- system/hostname.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/system/hostname.py b/system/hostname.py index 2442f0ad3cc..f986a91f8f3 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -21,7 +21,9 @@ DOCUMENTATION = ''' --- module: hostname -author: "Hiroaki Nakamura (@hnakamur)" +author: + - "Hiroaki Nakamura (@hnakamur)" + - "Hideki Saito (@saito-hideki)" version_added: "1.4" short_description: Manage hostname requirements: [ hostname ] @@ -116,13 +118,13 @@ class GenericStrategy(object): - set_current_hostname(name) - set_permanent_hostname(name) """ + def __init__(self, module): self.module = module - - HOSTNAME_CMD = '/bin/hostname' + self.hostname_cmd = self.module.get_bin_path('hostname', True) def get_current_hostname(self): - cmd = [self.HOSTNAME_CMD] + cmd = [self.hostname_cmd] rc, out, err = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % @@ -130,7 +132,7 @@ class GenericStrategy(object): return out.strip() def set_current_hostname(self, name): - cmd = [self.HOSTNAME_CMD, name] + cmd = [self.hostname_cmd, name] rc, out, err = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % @@ -369,11 +371,9 @@ class SolarisStrategy(GenericStrategy): execute hostname command. """ - HOSTNAME_CMD = '/usr/bin/hostname' - def set_current_hostname(self, name): cmd_option = '-t' - cmd = [self.HOSTNAME_CMD, cmd_option, name] + cmd = [self.hostname_cmd, cmd_option, name] rc, out, err = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % @@ -390,7 +390,7 @@ class SolarisStrategy(GenericStrategy): return out.strip() def set_permanent_hostname(self, name): - cmd = [self.HOSTNAME_CMD, name] + cmd = [self.hostname_cmd, name] rc, out, err = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % From f3a0f0f580edb0b4bb9d7a7888122e3315b15b07 Mon Sep 17 00:00:00 2001 From: Sudheer Satyanarayana Date: Sun, 26 Jul 2015 18:08:33 +0530 Subject: [PATCH 258/386] Update mysql_db.py Mention package installation instructions for CentOS in notes. --- database/mysql/mysql_db.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index c018ad143db..33720f5d4f6 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -83,7 +83,8 @@ options: required: false notes: - Requires the MySQLdb Python package on the remote host. For Ubuntu, this - is as easy as apt-get install python-mysqldb. (See M(apt).) + is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this + is as easy as yum install MySQL-python. (See M(yum).) - Both I(login_password) and I(login_user) are required when you are passing credentials. If none are present, the module will attempt to read the credentials from C(~/.my.cnf), and finally fall back to using the MySQL From 2bfddb015d2d5d162647957c31431baf30a30938 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 26 Jul 2015 14:40:22 -0400 Subject: [PATCH 259/386] In copy, set the mode before running th validation Because some programs that do validation (like visudo) may require the permissions to be more restricted. Fixes ansible/ansible#11385 --- files/copy.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/files/copy.py b/files/copy.py index 711c4ec8e52..f9f1b365c74 100644 --- a/files/copy.py +++ b/files/copy.py @@ -220,6 +220,7 @@ def main(): original_basename = module.params.get('original_basename',None) validate = module.params.get('validate',None) follow = module.params['follow'] + mode = module.params['mode'] if not os.path.exists(src): module.fail_json(msg="Source %s failed to transfer" % (src)) @@ -289,6 +290,11 @@ def main(): os.unlink(dest) open(dest, 'w').close() if validate: + # if we have a mode, make sure we set it on the temporary + # file source as some validations may require it + # FIXME: should we do the same for owner/group here too? + if mode is not None: + module.set_mode_if_different(src, mode, False) if "%s" not in validate: module.fail_json(msg="validate must contain %%s: %s" % (validate)) (rc,out,err) = module.run_command(validate % src) From 69deab1786aa0bcfd0347bb4c33a92eabd47170b Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Mon, 27 Jul 2015 11:02:24 -0400 Subject: [PATCH 260/386] Warn on use of sudo, etc. in shell/command. --- commands/command.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/commands/command.py b/commands/command.py index dbb23949273..b703c669b68 100644 --- a/commands/command.py +++ b/commands/command.py @@ -144,12 +144,15 @@ def check_command(commandline): 'mount': 'mount', 'rpm': 'yum', 'yum': 'yum', 'apt-get': 'apt-get', 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile', 'rsync': 'synchronize' } + become = [ 'sudo', 'su', 'pbrun', 'pfexec', 'runas' ] warnings = list() command = os.path.basename(commandline.split()[0]) if command in arguments: warnings.append("Consider using file module with %s rather than running %s" % (arguments[command], command)) if command in commands: warnings.append("Consider using %s module rather than running %s" % (commands[command], command)) + if command in become: + warnings.append("Consider using 'become', 'become_method', and 'become_user' rather than running %s" % (command,)) return warnings From bc60c893f5cc2127c921c60b3ba3962dc8d46385 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 27 Jul 2015 12:50:48 -0400 Subject: [PATCH 261/386] Prefer non-zero rc's over text analyis in service status checks Fixes ansible/ansible#10441 --- system/service.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/system/service.py b/system/service.py index 8caece20143..2ac7f2a4ad7 100644 --- a/system/service.py +++ b/system/service.py @@ -579,6 +579,11 @@ class LinuxService(Service): self.running = "started" in openrc_status_stdout self.crashed = "crashed" in openrc_status_stderr + # Prefer a non-zero return code. For reference, see: + # http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html + if self.running is None and rc in [1, 2, 3, 4, 69]: + self.running = False + # if the job status is still not known check it by status output keywords # Only check keywords if there's only one line of output (some init # scripts will output verbosely in case of error and those can emit @@ -603,14 +608,10 @@ class LinuxService(Service): elif 'dead but pid file exists' in cleanout: self.running = False - # if the job status is still not known check it by response code - # For reference, see: - # http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html - if self.running is None: - if rc in [1, 2, 3, 4, 69]: - self.running = False - elif rc == 0: - self.running = True + # if the job status is still not known and we got a zero for the + # return code, assume here that the service is running + if self.running is None and rc == 0: + self.running = True # if the job status is still not known check it by special conditions if self.running is None: From 8a9405a55bc5ac814b6e952acd2df0a6467e8a27 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 27 Jul 2015 10:40:27 -0700 Subject: [PATCH 262/386] Convert object into dict so it will turn into json properly --- cloud/amazon/ec2_lc.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 0721b4e203d..179ef14d70f 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -237,11 +237,14 @@ def create_launch_config(connection, module): changed = True except BotoServerError, e: module.fail_json(msg=str(e)) - result = launch_configs[0] + + result = dict( + ((a[0], a[1]) for a in vars(launch_configs[0]) if a[0] not in ('connection',))) module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=result.instance_type, + security_groups=result.security_groups, + instance_type=result.instance_type, result=result) From 587370e2e70103fc8e6270ef73fcc2a7125dca2a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 27 Jul 2015 14:20:38 -0400 Subject: [PATCH 263/386] minor doc fixes to get_url --- network/basics/get_url.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 01479260277..5e39887df7f 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -46,8 +46,6 @@ options: description: - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path required: true - default: null - aliases: [] dest: description: - absolute path of where to download the file to. @@ -57,7 +55,6 @@ options: If C(dest) is a directory, the file will always be downloaded (regardless of the force option), but replaced only if the contents changed. required: true - default: null force: description: - If C(yes) and C(dest) is not a directory, will download the file every @@ -100,10 +97,10 @@ options: version_added: '1.8' headers: description: - - Add custom HTTP headers to a request in the format 'key:value,key:value' - required: false - default: null - version_added: '2.0' + - 'Add custom HTTP headers to a request in the format "key:value,key:value"' + required: false + default: null + version_added: '2.0' url_username: description: - The username for use in HTTP basic authentication. This parameter can be used @@ -112,8 +109,8 @@ options: version_added: '1.6' url_password: description: - - The password for use in HTTP basic authentication. If the C(url_username) - parameter is not specified, the C(url_password) parameter will not be used. + - The password for use in HTTP basic authentication. If the C(url_username) + parameter is not specified, the C(url_password) parameter will not be used. required: false version_added: '1.6' force_basic_auth: From 19741d75eb01105fabdb26ef628f8fa5abff36b6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 27 Jul 2015 11:35:21 -0700 Subject: [PATCH 264/386] Fi the local_rsync_path parameter used by the action plugin --- files/synchronize.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/synchronize.py b/files/synchronize.py index ff58f9c1032..8266ece7b36 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -233,6 +233,7 @@ def main(): delete = dict(default='no', type='bool'), private_key = dict(default=None), rsync_path = dict(default=None), + _local_rsync_path = dict(default='rsync', type='path'), archive = dict(default='yes', type='bool'), checksum = dict(default='no', type='bool'), compress = dict(default='yes', type='bool'), @@ -261,7 +262,7 @@ def main(): delete = module.params['delete'] private_key = module.params['private_key'] rsync_path = module.params['rsync_path'] - rsync = module.params.get('local_rsync_path', 'rsync') + rsync = module.params.get('_local_rsync_path', 'rsync') rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout') archive = module.params['archive'] checksum = module.params['checksum'] From 84dfcf36907a6487e588d214ebf4f117aecbd327 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Fri, 24 Jul 2015 15:41:19 -0700 Subject: [PATCH 265/386] Handle non-updatable openstack subnet details Some things cannot be updated via the API, so check for those and fail if the user is wanting to update them. Also don't try to update ipv6 stuff, as that doesn't work and will cause a traceback. --- cloud/openstack/os_subnet.py | 44 +++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index b62eb10b0cc..22876c80869 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -144,9 +144,38 @@ EXAMPLES = ''' ipv6_address_mode: dhcpv6-stateless ''' +def _can_update(subnet, module, cloud): + """Check for differences in non-updatable values""" + network_name = module.params['network_name'] + cidr = module.params['cidr'] + ip_version = int(module.params['ip_version']) + ipv6_ra_mode = module.params['ipv6_ra_mode'] + ipv6_a_mode = module.params['ipv6_address_mode'] -def _needs_update(subnet, module): + if network_name: + network = cloud.get_network(network_name) + if network: + netid = network['id'] + else: + module.fail_json(msg='No network found for %s' % network_name) + if netid != subnet['network_id']: + module.fail_json(msg='Cannot update network_name in existing \ + subnet') + if ip_version and subnet['ip_version'] != ip_version: + module.fail_json(msg='Cannot update ip_version in existing subnet') + if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ip_version: + module.fail_json(msg='Cannot update ipv6_ra_mode in existing subnet') + if ipv6_a_mode and subnet.get('ipv6_address_mode', None) != ipv6_a_mode: + module.fail_json(msg='Cannot update ipv6_address_mode in existing \ + subnet') + +def _needs_update(subnet, module, cloud): """Check for differences in the updatable values.""" + + # First check if we are trying to update something we're not allowed to + _can_update(subnet, module, cloud) + + # now check for the things we are allowed to update enable_dhcp = module.params['enable_dhcp'] subnet_name = module.params['name'] pool_start = module.params['allocation_pool_start'] @@ -176,12 +205,12 @@ def _needs_update(subnet, module): return False -def _system_state_change(module, subnet): +def _system_state_change(module, subnet, cloud): state = module.params['state'] if state == 'present': if not subnet: return True - return _needs_update(subnet, module) + return _needs_update(subnet, module, cloud) if state == 'absent' and subnet: return True return False @@ -245,7 +274,8 @@ def main(): subnet = cloud.get_subnet(subnet_name) if module.check_mode: - module.exit_json(changed=_system_state_change(module, subnet)) + module.exit_json(changed=_system_state_change(module, subnet, + cloud)) if state == 'present': if not subnet: @@ -261,16 +291,14 @@ def main(): ipv6_address_mode=ipv6_a_mode) changed = True else: - if _needs_update(subnet, module): + if _needs_update(subnet, module, cloud): cloud.update_subnet(subnet['id'], subnet_name=subnet_name, enable_dhcp=enable_dhcp, gateway_ip=gateway_ip, dns_nameservers=dns, allocation_pools=pool, - host_routes=host_routes, - ipv6_ra_mode=ipv6_ra_mode, - ipv6_address_mode=ipv6_a_mode) + host_routes=host_routes) changed = True else: changed = False From dbdbcb33ccdbc14a6eb805181e7a1403399badc0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 27 Jul 2015 22:32:32 -0400 Subject: [PATCH 266/386] defined DEFAULT_DOCKER_API_VERSION to avoid it erroring out before we send the message that docker.py is missing --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 1e55b31e8f0..28478667e2f 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -417,6 +417,7 @@ EXAMPLES = ''' ''' HAS_DOCKER_PY = True +DEFAULT_DOCKER_API_VERSION = None import sys import json From 1f62f5f580f21cc5abc4072829f0abee544368ea Mon Sep 17 00:00:00 2001 From: Jaime Gago Date: Mon, 27 Jul 2015 23:40:49 -0700 Subject: [PATCH 267/386] Fix example indentation bug --- cloud/amazon/ec2_vol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 0d275cc91d7..4b829f7c26e 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -140,7 +140,7 @@ EXAMPLES = ''' - ec2_vol: instance: "{{ item.id }} " volume_size: 5 - with_items: ec2.instances + with_items: ec2.instances register: ec2_vol # Example: Launch an instance and then add a volume if not already attached From 520a125693cc51e174b16517510c5bb4faa7b51c Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Sat, 25 Apr 2015 00:12:25 -0400 Subject: [PATCH 268/386] bugfixes for redhat_subscription - correctly return pool ids for newer versions of subscription-manager - allow for managing subscriptions after initial registration. --- packaging/os/redhat_subscription.py | 77 +++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 15 deletions(-) diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index 1cfd8fc25a6..0c4647ccf2a 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -180,7 +180,7 @@ class Rhsm(RegistrationBase): for k,v in kwargs.items(): if re.search(r'^(system|rhsm)_', k): args.append('--%s=%s' % (k.replace('_','.'), v)) - + self.module.run_command(args, check_rc=True) @property @@ -226,14 +226,26 @@ class Rhsm(RegistrationBase): rc, stderr, stdout = self.module.run_command(args, check_rc=True) - def unsubscribe(self): + def unsubscribe(self, serials=None): ''' - Unsubscribe a system from all subscribed channels + Unsubscribe a system from subscribed channels + Args: + serials(list or None): list of serials to unsubscribe. If + serials is none or an empty list, then + all subscribed channels will be removed. Raises: * Exception - if error occurs while running command ''' - args = ['subscription-manager', 'unsubscribe', '--all'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) + items = [] + if serials is not None and serials: + items = ["--serial=%s" % s for s in serials] + if serials is None: + items = ["--all"] + + if items: + args = ['subscription-manager', 'unsubscribe'] + items + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + return serials def unregister(self): ''' @@ -255,8 +267,27 @@ class Rhsm(RegistrationBase): # Available pools ready for subscription available_pools = RhsmPools(self.module) + subscribed_pool_ids = [] for pool in available_pools.filter(regexp): pool.subscribe() + subscribed_pool_ids.append(pool.get_pool_id()) + return subscribed_pool_ids + + def update_subscriptions(self, regexp): + changed=False + consumed_pools = RhsmPools(self.module, consumed=True) + pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter(regexp)] + + serials_to_remove=[p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep] + serials = self.unsubscribe(serials=serials_to_remove) + + subscribed_pool_ids = self.subscribe(regexp) + + if subscribed_pool_ids or serials: + changed=True + return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids, + 'unsubscribed_serials': serials} + class RhsmPool(object): @@ -272,8 +303,11 @@ class RhsmPool(object): def __str__(self): return str(self.__getattribute__('_name')) + def get_pool_id(self): + return getattr(self, 'PoolId', getattr(self, 'PoolID')) + def subscribe(self): - args = "subscription-manager subscribe --pool %s" % self.PoolId + args = "subscription-manager subscribe --pool %s" % self.get_pool_id() rc, stdout, stderr = self.module.run_command(args, check_rc=True) if rc == 0: return True @@ -285,18 +319,22 @@ class RhsmPools(object): """ This class is used for manipulating pools subscriptions with RHSM """ - def __init__(self, module): + def __init__(self, module, consumed=False): self.module = module - self.products = self._load_product_list() + self.products = self._load_product_list(consumed) def __iter__(self): return self.products.__iter__() - def _load_product_list(self): + def _load_product_list(self, consumed=False): """ - Loads list of all available pools for system in data structure + Loads list of all available or consumed pools for system in data structure + + Args: + consumed(bool): if True list consumed pools, else list available pools (default False) """ - args = "subscription-manager list --available" + args = "subscription-manager list" + args += " --consumed" if consumed else " --available" rc, stdout, stderr = self.module.run_command(args, check_rc=True) products = [] @@ -375,18 +413,27 @@ def main(): # Register system if rhn.is_registered: - module.exit_json(changed=False, msg="System already registered.") + if pool != '^$': + try: + result = rhn.update_subscriptions(pool) + except Exception, e: + module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, e)) + else: + module.exit_json(**result) + else: + module.exit_json(changed=False, msg="System already registered.") else: try: rhn.enable() rhn.configure(**module.params) rhn.register(username, password, autosubscribe, activationkey, org_id) - rhn.subscribe(pool) + subscribed_pool_ids = rhn.subscribe(pool) except Exception, e: module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, e)) else: - module.exit_json(changed=True, msg="System successfully registered to '%s'." % server_hostname) - + module.exit_json(changed=True, + msg="System successfully registered to '%s'." % server_hostname, + subscribed_pool_ids=subscribed_pool_ids) # Ensure system is *not* registered if state == 'absent': if not rhn.is_registered: From 08021026348cdab4c4651154755567f546beecc3 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 28 Jul 2015 06:23:20 -0400 Subject: [PATCH 269/386] Add example for changing consumed subscriptions --- packaging/os/redhat_subscription.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index 0c4647ccf2a..233d1a04e2b 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -76,6 +76,12 @@ EXAMPLES = ''' - redhat_subscription: state=present activationkey=1-222333444 pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$' + +# Update the consumed subscriptions from the previous example (remove the Red +# Hat Virtualization subscription) +- redhat_subscription: state=present + activationkey=1-222333444 + pool='^Red Hat Enterprise Server$' ''' import os From c458b5e96cbee9359ead3540365120f2215e0517 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 28 Jul 2015 06:30:37 -0400 Subject: [PATCH 270/386] python 2.4 syntax fix --- packaging/os/redhat_subscription.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index 233d1a04e2b..8e1482a8c4f 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -340,7 +340,10 @@ class RhsmPools(object): consumed(bool): if True list consumed pools, else list available pools (default False) """ args = "subscription-manager list" - args += " --consumed" if consumed else " --available" + if consumed: + args += " --consumed" + else: + args += " --available" rc, stdout, stderr = self.module.run_command(args, check_rc=True) products = [] From 8227b48a6a6a8905cd005767efa5bba951d7e0a0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 28 Jul 2015 14:00:37 -0400 Subject: [PATCH 271/386] minor doc fixes --- cloud/amazon/cloudformation.py | 10 ---------- cloud/amazon/ec2_ami_find.py | 2 +- cloud/amazon/ec2_eip.py | 2 +- cloud/amazon/ec2_facts.py | 2 +- cloud/amazon/route53.py | 22 ++++------------------ cloud/google/gc_storage.py | 2 +- cloud/google/gce.py | 26 ++++---------------------- commands/command.py | 2 -- commands/raw.py | 5 ++--- 9 files changed, 14 insertions(+), 59 deletions(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index f95fbba00e2..e4ac80fcf5a 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -26,35 +26,28 @@ options: description: - name of the cloudformation stack required: true - default: null - aliases: [] disable_rollback: description: - If a stacks fails to form, rollback will remove the stack required: false default: "false" choices: [ "true", "false" ] - aliases: [] template_parameters: description: - a list of hashes of all the template variables for the stack required: false default: {} - aliases: [] state: description: - If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated. If state is "absent", stack will be removed. required: true - default: null - aliases: [] template: description: - The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present" Must give full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" required: false default: null - aliases: [] notification_arns: description: - The Simple Notification Service (SNS) topic ARNs to publish stack related events. @@ -66,7 +59,6 @@ options: - the path of the cloudformation stack policy required: false default: null - aliases: [] version_added: "1.9" tags: description: @@ -74,13 +66,11 @@ options: Requires at least Boto version 2.6.0. required: false default: null - aliases: [] version_added: "1.4" region: description: - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used. required: true - default: null aliases: ['aws_region', 'ec2_region'] version_added: "1.5" template_url: diff --git a/cloud/amazon/ec2_ami_find.py b/cloud/amazon/ec2_ami_find.py index c8aa5d792df..f5ed91baab5 100644 --- a/cloud/amazon/ec2_ami_find.py +++ b/cloud/amazon/ec2_ami_find.py @@ -18,7 +18,7 @@ DOCUMENTATION = ''' --- module: ec2_ami_find -version_added: 2.0 +version_added: '2.0' short_description: Searches for AMIs to obtain the AMI ID and other information description: - Returns list of matching AMIs with AMI ID, along with other useful information diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index c3b764b2e63..c6a99a16782 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -5,7 +5,7 @@ module: ec2_eip short_description: associate an EC2 elastic IP with an instance. description: - This module associates AWS EC2 elastic IP addresses with instances -version_added: 1.4 +version_added: "1.4" options: instance_id: description: diff --git a/cloud/amazon/ec2_facts.py b/cloud/amazon/ec2_facts.py index 6bd587bf018..5147428f646 100644 --- a/cloud/amazon/ec2_facts.py +++ b/cloud/amazon/ec2_facts.py @@ -29,7 +29,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] - version_added: 1.5.1 + version_added: '1.5.1' description: - This module fetches data from the metadata servers in ec2 (aws) as per http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html. diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index aca01193555..9b867fb1e72 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -24,75 +24,61 @@ description: options: command: description: - - Specifies the action to take. + - Specifies the action to take. required: true - default: null - aliases: [] choices: [ 'get', 'create', 'delete' ] zone: description: - The DNS zone to modify required: true - default: null - aliases: [] hosted_zone_id: description: - The Hosted Zone ID of the DNS zone to modify required: false - version_added: 2.0 + version_added: "2.0" default: null record: description: - The full DNS record to create or delete required: true - default: null - aliases: [] ttl: description: - The TTL to give the new record required: false default: 3600 (one hour) - aliases: [] type: description: - The type of DNS record to create required: true - default: null - aliases: [] choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ] alias: description: - Indicates if this is an alias record. required: false - version_added: 1.9 + version_added: "1.9" default: False - aliases: [] choices: [ 'True', 'False' ] alias_hosted_zone_id: description: - The hosted zone identifier. required: false - version_added: 1.9 + version_added: "1.9" default: null - aliases: [] value: description: - The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values for the record must be specified or Route53 will not delete it. required: false default: null - aliases: [] overwrite: description: - Whether an existing record should be overwritten on create if values do not match required: false default: null - aliases: [] retry_interval: description: - In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long. required: false default: 500 - aliases: [] private_zone: description: - If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone. diff --git a/cloud/google/gc_storage.py b/cloud/google/gc_storage.py index c1e6f5707a6..291d4ca0f4d 100644 --- a/cloud/google/gc_storage.py +++ b/cloud/google/gc_storage.py @@ -53,7 +53,7 @@ options: required: false default: private headers: - version_added: 2.0 + version_added: "2.0" description: - Headers to attach to object. required: false diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 60287ad8b6e..1de351a12fb 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -32,77 +32,65 @@ options: - image string to use for the instance required: false default: "debian-7" - aliases: [] instance_names: description: - a comma-separated list of instance names to create or destroy required: false default: null - aliases: [] machine_type: description: - machine type to use for the instance, use 'n1-standard-1' by default required: false default: "n1-standard-1" - aliases: [] metadata: description: - a hash/dictionary of custom data for the instance; '{"key":"value",...}' required: false default: null - aliases: [] service_account_email: - version_added: 1.5.1 + version_added: "1.5.1" description: - service account email required: false default: null - aliases: [] service_account_permissions: - version_added: 2.0 + version_added: "2.0" description: - service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information) required: false default: null - aliases: [] choices: ["bigquery", "cloud-platform", "compute-ro", "compute-rw", "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email"] pem_file: - version_added: 1.5.1 + version_added: "1.5.1" description: - path to the pem file associated with the service account email required: false default: null - aliases: [] project_id: - version_added: 1.5.1 + version_added: "1.5.1" description: - your GCE project ID required: false default: null - aliases: [] name: description: - identifier when working with a single instance required: false - aliases: [] network: description: - name of the network, 'default' will be used if not specified required: false default: "default" - aliases: [] persistent_boot_disk: description: - if set, create the instance with a persistent boot disk required: false default: "false" - aliases: [] disks: description: - a list of persistent disks to attach to the instance; a string value gives the name of the disk; alternatively, a dictionary value can define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry will be the boot disk (which must be READ_WRITE). required: false default: null - aliases: [] version_added: "1.7" state: description: @@ -110,40 +98,34 @@ options: required: false default: "present" choices: ["active", "present", "absent", "deleted"] - aliases: [] tags: description: - a comma-separated list of tags to associate with the instance required: false default: null - aliases: [] zone: description: - the GCE zone to use required: true default: "us-central1-a" - aliases: [] ip_forward: version_added: "1.9" description: - set to true if the instance can forward ip packets (useful for gateways) required: false default: "false" - aliases: [] external_ip: version_added: "1.9" description: - type of external ip, ephemeral by default required: false default: "ephemeral" - aliases: [] disk_auto_delete: version_added: "1.9" description: - if set boot disk will be removed after instance destruction required: false default: "true" - aliases: [] requirements: - "python >= 2.6" diff --git a/commands/command.py b/commands/command.py index b703c669b68..8107dd521e9 100644 --- a/commands/command.py +++ b/commands/command.py @@ -30,7 +30,6 @@ import os DOCUMENTATION = ''' --- module: command -version_added: historical short_description: Executes a command on a remote node description: - The M(command) module takes the command name followed by a list of space-delimited arguments. @@ -45,7 +44,6 @@ options: See the examples! required: true default: null - aliases: [] creates: description: - a filename or glob pattern, when it already exists, this step will B(not) be run. diff --git a/commands/raw.py b/commands/raw.py index 5305c978630..b6cf6c7b925 100644 --- a/commands/raw.py +++ b/commands/raw.py @@ -3,7 +3,6 @@ DOCUMENTATION = ''' --- module: raw -version_added: historical short_description: Executes a low-down and dirty SSH command options: free_form: @@ -15,7 +14,7 @@ options: - change the shell used to execute the command. Should be an absolute path to the executable. required: false version_added: "1.0" -description: +description: - Executes a low-down and dirty SSH command, not going through the module subsystem. This is useful and should only be done in two cases. The first case is installing C(python-simplejson) on older (Python 2.4 and @@ -34,7 +33,7 @@ notes: playbooks will follow the trend of using M(command) unless M(shell) is explicitly required. When running ad-hoc commands, use your best judgement. -author: +author: - Ansible Core Team - Michael DeHaan ''' From 92f32a0253246936746e9caa4cf86f5f30000b47 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 28 Jul 2015 14:50:39 -0400 Subject: [PATCH 272/386] added version added to headers in s3 --- cloud/amazon/s3.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 1305f2529b6..da19cf0316f 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -63,6 +63,7 @@ options: - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. required: false default: null + version_added: "2.0" marker: description: - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order. From 88a0d79873742fe2df8fdf9898df6f4fc56c2475 Mon Sep 17 00:00:00 2001 From: Luke Rohde Date: Tue, 28 Jul 2015 16:02:49 -0400 Subject: [PATCH 273/386] Use msg kwarg to pass error message in ec2_snapshot --- cloud/amazon/ec2_snapshot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index ee9d5ab1110..41dc4bdb21a 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -146,9 +146,9 @@ def main(): state = module.params.get('state') if not volume_id and not instance_id and not snapshot_id or volume_id and instance_id and snapshot_id: - module.fail_json('One and only one of volume_id or instance_id or snapshot_id must be specified') + module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified') if instance_id and not device_name or device_name and not instance_id: - module.fail_json('Instance ID and device name must both be specified') + module.fail_json(msg='Instance ID and device name must both be specified') ec2 = ec2_connect(module) From b43f236b0533e54ed6893669417dd199a0f37811 Mon Sep 17 00:00:00 2001 From: Micheal Waltz Date: Tue, 28 Jul 2015 15:11:33 -0700 Subject: [PATCH 274/386] Fix incorrect var name for api_version --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index df7eb141aec..9b237fb1895 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1012,7 +1012,7 @@ class DockerManager(object): self.module.fail_json(msg=str(e)) #For v1.19 API and above use HostConfig, otherwise use Config - if docker_api_version >= 1.19: + if api_version >= 1.19: actual_mem = container['HostConfig']['Memory'] else: actual_mem = container['Config']['Memory'] From 4241d940b7ae98e35c30dbda17e240134ef7f3dc Mon Sep 17 00:00:00 2001 From: "Cameron Wood (@cewood)" Date: Tue, 30 Sep 2014 10:26:06 +1000 Subject: [PATCH 275/386] Support specifying cidr_ip as a list Update/fix to Support specifying cidr_ip as a list Unicode isn't compatible with python2, so we needed some other solution to this problem. The simplest approach is if the ip item isn't already a list, simply convert it to one, and we're done. Thanks to @mspiegle for this suggestion. --- cloud/amazon/ec2_group.py | 70 ++++++++++++++++++++++----------------- 1 file changed, 40 insertions(+), 30 deletions(-) diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index b502bd1db53..4a7b9b5a828 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -288,19 +288,24 @@ def main(): rule['from_port'] = None rule['to_port'] = None - # If rule already exists, don't later delete it - ruleId = make_rule_key('in', rule, group_id, ip) - if ruleId in groupRules: - del groupRules[ruleId] - # Otherwise, add new rule - else: - grantGroup = None - if group_id: - grantGroup = groups[group_id] + # Convert ip to list we can iterate over + if not isinstance(ip, list): + ip = [ip] - if not module.check_mode: - group.authorize(rule['proto'], rule['from_port'], rule['to_port'], ip, grantGroup) - changed = True + # If rule already exists, don't later delete it + for thisip in ip: + ruleId = make_rule_key('in', rule, group_id, thisip) + if ruleId in groupRules: + del groupRules[ruleId] + # Otherwise, add new rule + else: + grantGroup = None + if group_id: + grantGroup = groups[group_id] + + if not module.check_mode: + group.authorize(rule['proto'], rule['from_port'], rule['to_port'], thisip, grantGroup) + changed = True # Finally, remove anything left in the groupRules -- these will be defunct rules if purge_rules: @@ -328,25 +333,30 @@ def main(): rule['from_port'] = None rule['to_port'] = None - # If rule already exists, don't later delete it - ruleId = make_rule_key('out', rule, group_id, ip) - if ruleId in groupRules: - del groupRules[ruleId] - # Otherwise, add new rule - else: - grantGroup = None - if group_id: - grantGroup = groups[group_id].id + # Convert ip to list we can iterate over + if not isinstance(ip, list): + ip = [ip] - if not module.check_mode: - ec2.authorize_security_group_egress( - group_id=group.id, - ip_protocol=rule['proto'], - from_port=rule['from_port'], - to_port=rule['to_port'], - src_group_id=grantGroup, - cidr_ip=ip) - changed = True + # If rule already exists, don't later delete it + for thisip in ip: + ruleId = make_rule_key('out', rule, group_id, thisip) + if ruleId in groupRules: + del groupRules[ruleId] + # Otherwise, add new rule + else: + grantGroup = None + if group_id: + grantGroup = groups[group_id].id + + if not module.check_mode: + ec2.authorize_security_group_egress( + group_id=group.id, + ip_protocol=rule['proto'], + from_port=rule['from_port'], + to_port=rule['to_port'], + src_group_id=grantGroup, + cidr_ip=thisip) + changed = True elif vpc_id and not module.check_mode: # when using a vpc, but no egress rules are specified, # we add in a default allow all out rule, which was the From 0e9fccd098c15d93dfb6114079f19786140a1114 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 29 Jul 2015 14:53:55 -0400 Subject: [PATCH 276/386] changed verbose override variable --- system/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/setup.py b/system/setup.py index 2fbe71e260a..a28c037c745 100644 --- a/system/setup.py +++ b/system/setup.py @@ -123,7 +123,7 @@ def run_setup(module): setup_result['ansible_facts'][k] = v # hack to keep --verbose from showing all the setup module results - setup_result['verbose_override'] = True + setup_result['_ansible_verbose_override'] = True return setup_result From 4a30e35e532cfd58074653ae050a3c51c7f492fe Mon Sep 17 00:00:00 2001 From: brannon Date: Wed, 29 Jul 2015 12:23:37 -0700 Subject: [PATCH 277/386] Fix windows feature example to actually install IIS --- windows/win_feature.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_feature.py b/windows/win_feature.py index 2d7a747cea0..6ef4774788c 100644 --- a/windows/win_feature.py +++ b/windows/win_feature.py @@ -90,7 +90,7 @@ $ ansible -i hosts -m win_feature -a "name=Web-Server,Web-Common-Http" all - name: Install IIS win_feature: name: "Web-Server" - state: absent + state: present restart: yes include_sub_features: yes include_management_tools: yes From fc2a33b96613dacd2d00860126f6fa4ffb3c4e50 Mon Sep 17 00:00:00 2001 From: Brian Lloyd Date: Wed, 29 Jul 2015 19:38:15 -0400 Subject: [PATCH 278/386] Windows implementation of lineinfile and related documentation --- windows/win_lineinfile.ps1 | 452 +++++++++++++++++++++++++++++++++++++ windows/win_lineinfile.py | 155 +++++++++++++ 2 files changed, 607 insertions(+) create mode 100644 windows/win_lineinfile.ps1 create mode 100644 windows/win_lineinfile.py diff --git a/windows/win_lineinfile.ps1 b/windows/win_lineinfile.ps1 new file mode 100644 index 00000000000..ddf1d4e3000 --- /dev/null +++ b/windows/win_lineinfile.ps1 @@ -0,0 +1,452 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + + +# Parse the parameters file dropped by the Ansible machinery + +$params = Parse-Args $args; + + +# Initialize defaults for input parameters. + +$dest= Get-Attr $params "dest" $FALSE; +$regexp = Get-Attr $params "regexp" $FALSE; +$state = Get-Attr $params "state" "present"; +$line = Get-Attr $params "line" $FALSE; +$backrefs = Get-Attr $params "backrefs" "no"; +$insertafter = Get-Attr $params "insertafter" $FALSE; +$insertbefore = Get-Attr $params "insertbefore" $FALSE; +$create = Get-Attr $params "create" "no"; +$backup = Get-Attr $params "backup" "no"; +$validate = Get-Attr $params "validate" $FALSE; +$encoding = Get-Attr $params "encoding" "auto"; +$newline = Get-Attr $params "newline" "windows"; + + +# Parse dest / name /destfile param aliases for compatibility with lineinfile +# and fail if at least one spelling of the parameter is not provided. + +$dest = Get-Attr $params "dest" $FALSE; +If ($dest -eq $FALSE) { + $dest = Get-Attr $params "name" $FALSE; + If ($dest -eq $FALSE) { + $dest = Get-Attr $params "destfile" $FALSE; + If ($dest -eq $FALSE) { + Fail-Json (New-Object psobject) "missing required argument: dest"; + } + } +} + + +# Fail if the destination is not a file + +If (Test-Path $dest -pathType container) { + Fail-Json (New-Object psobject) "destination is a directory"; +} + + +# Write lines to a file using the specified line separator and encoding, +# performing validation if a validation command was specified. + +function WriteLines($outlines, $dest, $linesep, $encodingobj, $validate) { + $temppath = [System.IO.Path]::GetTempFileName(); + $joined = $outlines -join $linesep; + [System.IO.File]::WriteAllText($temppath, $joined, $encodingobj); + + If ($validate -ne $FALSE) { + + If (!($validate -like "*%s*")) { + Fail-Json (New-Object psobject) "validate must contain %s: $validate"; + } + + $validate = $validate.Replace("%s", $temppath); + + $parts = [System.Collections.ArrayList] $validate.Split(" "); + $cmdname = $parts[0]; + + $cmdargs = $validate.Substring($cmdname.Length + 1); + + $process = [Diagnostics.Process]::Start($cmdname, $cmdargs); + $process.WaitForExit(); + + If ($process.ExitCode -ne 0) { + [string] $output = $process.StandardOutput.ReadToEnd(); + [string] $error = $process.StandardError.ReadToEnd(); + Remove-Item $temppath -force; + Fail-Json (New-Object psobject) "failed to validate $cmdname $cmdargs with error: $output $error"; + } + + } + + # Commit changes to the destination file + $cleandest = $dest.Replace("/", "\"); + Move-Item $temppath $cleandest -force; +} + + +# Backup the file specified with a date/time filename + +function BackupFile($path) { + $backuppath = $path + "." + [DateTime]::Now.ToString("yyyyMMdd-HHmmss"); + Copy-Item $path $backuppath; + return $backuppath; +} + + + +# Implement the functionality for state == 'present' + +function Present($dest, $regexp, $line, $insertafter, $insertbefore, $create, $backup, $backrefs, $validate, $encodingobj, $linesep) { + + # Note that we have to clean up the dest path because ansible wants to treat / and \ as + # interchangable in windows pathnames, but .NET framework internals do not support that. + $cleandest = $dest.Replace("/", "\"); + + # Check if destination exists. If it does not exist, either create it if create == "yes" + # was specified or fail with a reasonable error message. + If (!(Test-Path $dest)) { + If ($create -eq "no") { + Fail-Json (New-Object psobject) "Destination $dest does not exist !"; + } + # Create new empty file, using the specified encoding to write correct BOM + [System.IO.File]::WriteAllLines($cleandest, "", $encodingobj); + } + + # Read the dest file lines using the indicated encoding into a mutable ArrayList. + $content = [System.IO.File]::ReadAllLines($cleandest, $encodingobj); + If ($content -eq $null) { + $lines = New-Object System.Collections.ArrayList; + } + Else { + $lines = [System.Collections.ArrayList] $content; + } + + # Compile the regex specified, if provided + $mre = $FALSE; + If ($regexp -ne $FALSE) { + $mre = New-Object Regex $regexp, 'Compiled'; + } + + # Compile the regex for insertafter or insertbefore, if provided + $insre = $FALSE; + + If ($insertafter -ne $FALSE -and $insertafter -ne "BOF" -and $insertafter -ne "EOF") { + $insre = New-Object Regex $insertafter, 'Compiled'; + } + ElseIf ($insertbefore -ne $FALSE -and $insertbefore -ne "BOF") { + $insre = New-Object Regex $insertbefore, 'Compiled'; + } + + # index[0] is the line num where regexp has been found + # index[1] is the line num where insertafter/inserbefore has been found + $index = -1, -1; + $lineno = 0; + + # The latest match object and matched line + $matched_line = ""; + $m = $FALSE; + + # Iterate through the lines in the file looking for matches + Foreach ($cur_line in $lines) { + If ($regexp -ne $FALSE) { + $m = $mre.Match($cur_line); + $match_found = $m.Success; + If ($match_found) { + $matched_line = $cur_line; + } + } + Else { + $match_found = $line -ceq $cur_line; + } + If ($match_found) { + $index[0] = $lineno; + } + ElseIf ($insre -ne $FALSE -and $insre.Match($cur_line).Success) { + If ($insertafter -ne $FALSE) { + $index[1] = $lineno + 1; + } + If ($insertbefore -ne $FALSE) { + $index[1] = $lineno; + } + } + $lineno = $lineno + 1; + } + + $changed = $FALSE; + $msg = ""; + + If ($index[0] -ne -1) { + If ($backrefs -ne "no") { + $new_line = [regex]::Replace($matched_line, $regexp, $line); + } + Else { + $new_line = $line; + } + If ($lines[$index[0]] -cne $new_line) { + $lines[$index[0]] = $new_line; + $msg = "line replaced"; + $changed = $TRUE; + } + } + ElseIf ($backrefs -ne "no") { + # No matches - no-op + } + ElseIf ($insertbefore -eq "BOF" -or $insertafter -eq "BOF") { + $lines.Insert(0, $line); + $msg = "line added"; + $changed = $TRUE; + } + ElseIf ($insertafter -eq "EOF" -or $index[1] -eq -1) { + $lines.Add($line); + $msg = "line added"; + $changed = $TRUE; + } + Else { + $lines.Insert($index[1], $line); + $msg = "line added"; + $changed = $TRUE; + } + + # Write backup file if backup == "yes" + $backupdest = ""; + + If ($changed -eq $TRUE -and $backup -eq "yes") { + $backupdest = BackupFile $dest; + } + + # Write changes to the destination file if changes were made + If ($changed) { + WriteLines $lines $dest $linesep $encodingobj $validate; + } + + $encodingstr = $encodingobj.WebName; + + # Return result information + $result = New-Object psobject @{ + changed = $changed + msg = $msg + backup = $backupdest + encoding = $encodingstr + } + + Exit-Json $result; +} + + +# Implement the functionality for state == 'absent' + +function Absent($dest, $regexp, $line, $backup, $validate, $encodingobj, $linesep) { + + # Check if destination exists. If it does not exist, fail with a reasonable error message. + If (!(Test-Path $dest)) { + Fail-Json (New-Object psobject) "Destination $dest does not exist !"; + } + + # Read the dest file lines using the indicated encoding into a mutable ArrayList. Note + # that we have to clean up the dest path because ansible wants to treat / and \ as + # interchangeable in windows pathnames, but .NET framework internals do not support that. + + $cleandest = $dest.Replace("/", "\"); + $content = [System.IO.File]::ReadAllLines($cleandest, $encodingobj); + If ($content -eq $null) { + $lines = New-Object System.Collections.ArrayList; + } + Else { + $lines = [System.Collections.ArrayList] $content; + } + + # Initialize message to be returned on success + $msg = ""; + + # Compile the regex specified, if provided + $cre = $FALSE; + If ($regexp -ne $FALSE) { + $cre = New-Object Regex $regexp, 'Compiled'; + } + + $found = New-Object System.Collections.ArrayList; + $left = New-Object System.Collections.ArrayList; + $changed = $FALSE; + + Foreach ($cur_line in $lines) { + If ($cre -ne $FALSE) { + $m = $cre.Match($cur_line); + $match_found = $m.Success; + } + Else { + $match_found = $line -ceq $cur_line; + } + If ($match_found) { + $found.Add($cur_line); + $changed = $TRUE; + } + Else { + $left.Add($cur_line); + } + } + + # Write backup file if backup == "yes" + $backupdest = ""; + + If ($changed -eq $TRUE -and $backup -eq "yes") { + $backupdest = BackupFile $dest; + } + + # Write changes to the destination file if changes were made + If ($changed) { + WriteLines $left $dest $linesep $encodingobj $validate; + } + + # Return result information + $fcount = $found.Count; + $msg = "$fcount line(s) removed"; + $encodingstr = $encodingobj.WebName; + + $result = New-Object psobject @{ + changed = $changed + msg = $msg + backup = $backupdest + found = $fcount + encoding = $encodingstr + } + + Exit-Json $result; +} + + +# Default to windows line separator - probably most common + +$linesep = "`r`n"; + +If ($newline -ne "windows") { + $linesep = "`n"; +} + + +# Fix any CR/LF literals in the line argument. PS will not recognize either backslash +# or backtick literals in the incoming string argument without this bit of black magic. + +If ($line -ne $FALSE) { + $line = $line.Replace("\r", "`r"); + $line = $line.Replace("\n", "`n"); +} + + +# Figure out the proper encoding to use for reading / writing the target file. + +# The default encoding is UTF-8 without BOM +$encodingobj = [System.Text.UTF8Encoding] $FALSE; + +# If an explicit encoding is specified, use that instead +If ($encoding -ne "auto") { + $encodingobj = [System.Text.Encoding]::GetEncoding($encoding); +} + +# Otherwise see if we can determine the current encoding of the target file. +# If the file doesn't exist yet (create == 'yes') we use the default or +# explicitly specified encoding set above. +Elseif (Test-Path $dest) { + + # Get a sorted list of encodings with preambles, longest first + + $max_preamble_len = 0; + $sortedlist = New-Object System.Collections.SortedList; + Foreach ($encodinginfo in [System.Text.Encoding]::GetEncodings()) { + $encoding = $encodinginfo.GetEncoding(); + $plen = $encoding.GetPreamble().Length; + If ($plen -gt $max_preamble_len) { + $max_preamble_len = $plen; + } + If ($plen -gt 0) { + $sortedlist.Add(-($plen * 1000000 + $encoding.CodePage), $encoding); + } + } + + # Get the first N bytes from the file, where N is the max preamble length we saw + + [Byte[]]$bom = Get-Content -Encoding Byte -ReadCount $max_preamble_len -TotalCount $max_preamble_len -Path $dest; + + # Iterate through the sorted encodings, looking for a full match. + + $found = $FALSE; + Foreach ($encoding in $sortedlist.GetValueList()) { + $preamble = $encoding.GetPreamble(); + If ($preamble) { + Foreach ($i in 0..$preamble.Length) { + If ($preamble[$i] -ne $bom[$i]) { + break; + } + Elseif ($i + 1 -eq $preamble.Length) { + $encodingobj = $encoding; + $found = $TRUE; + } + } + If ($found) { + break; + } + } + } +} + + +# Main dispatch - based on the value of 'state', perform argument validation and +# call the appropriate handler function. + +If ($state -eq "present") { + + If ( $backrefs -ne "no" -and $regexp -eq $FALSE ) { + Fail-Json (New-Object psobject) "regexp= is required with backrefs=true"; + } + + If ($line -eq $FALSE) { + Fail-Json (New-Object psobject) "line= is required with state=present"; + } + + If ($insertbefore -eq $FALSE -and $insertafter -eq $FALSE) { + $insertafter = "EOF"; + } + + Present $dest $regexp $line $insertafter $insertbefore $create $backup $backrefs $validate $encodingobj $linesep; + +} +Else { + + If ($regex -eq $FALSE -and $line -eq $FALSE) { + Fail-Json (New-Object psobject) "one of line= or regexp= is required with state=absent"; + } + + Absent $dest $regexp $line $backup $validate $encodingobj $linesep; +} + + + + + + + + + + + + + + + + + diff --git a/windows/win_lineinfile.py b/windows/win_lineinfile.py new file mode 100644 index 00000000000..6c54fd2bea8 --- /dev/null +++ b/windows/win_lineinfile.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +module: win_lineinfile +author: Brian Lloyd (brian.d.lloyd@gmail.com) +short_description: Ensure a particular line is in a file, or replace an + existing line using a back-referenced regular expression. +description: + - This module will search a file for a line, and ensure that it is present or absent. + - This is primarily useful when you want to change a single line in + a file only. +version_added: "2.0" +options: + dest: + required: true + aliases: [ name, destfile ] + description: + - The path of the file to modify. + regexp: + required: false + description: + - The regular expression to look for in every line of the file. For + C(state=present), the pattern to replace if found; only the last line + found will be replaced. For C(state=absent), the pattern of the line + to remove. Uses .NET compatible regular expressions; see + U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx). + state: + required: false + choices: [ present, absent ] + default: "present" + description: + - Whether the line should be there or not. + line: + required: false + description: + - Required for C(state=present). The line to insert/replace into the + file. If C(backrefs) is set, may contain backreferences that will get + expanded with the C(regexp) capture groups if the regexp matches. + backrefs: + required: false + default: "no" + choices: [ "yes", "no" ] + description: + - Used with C(state=present). If set, line can contain backreferences + (both positional and named) that will get populated if the C(regexp) + matches. This flag changes the operation of the module slightly; + C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp) + doesn't match anywhere in the file, the file will be left unchanged. + If the C(regexp) does match, the last matching line will be replaced by + the expanded line parameter. + insertafter: + required: false + default: EOF + description: + - Used with C(state=present). If specified, the line will be inserted + after the last match of specified regular expression. A special value is + available; C(EOF) for inserting the line at the end of the file. + If specified regular expresion has no matches, EOF will be used instead. + May not be used with C(backrefs). + choices: [ 'EOF', '*regex*' ] + insertbefore: + required: false + version_added: "1.1" + description: + - Used with C(state=present). If specified, the line will be inserted + before the last match of specified regular expression. A value is + available; C(BOF) for inserting the line at the beginning of the file. + If specified regular expresion has no matches, the line will be + inserted at the end of the file. May not be used with C(backrefs). + choices: [ 'BOF', '*regex*' ] + create: + required: false + choices: [ "yes", "no" ] + default: "no" + description: + - Used with C(state=present). If specified, the file will be created + if it does not already exist. By default it will fail if the file + is missing. + backup: + required: false + default: "no" + choices: [ "yes", "no" ] + description: + - Create a backup file including the timestamp information so you can + get the original file back if you somehow clobbered it incorrectly. + validate: + required: false + description: + - validation to run before copying into place. + Use %s in the command to indicate the current file to validate. + The command is passed securely so shell features like + expansion and pipes won't work. + required: false + default: None + encoding: + required: false + default: "auto" + description: + - Specifies the encoding of the source text file to operate on (and thus what the + output encoding will be). The default of C(auto) will cause the module to auto-detect + the encoding of the source file and ensure that the modified file is written with the + same encoding. + An explicit encoding can be passed as a string that is a valid value to pass to + the .NET framework System.Text.Encoding.GetEncoding() method - see + U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx). + This is mostly useful with C(create=yes) if you want to create a new file with a specific + encoding. If C(create=yes) is specified without a specific encoding, the default encoding + (UTF-8, no BOM) will be used. + newline: + required: false + description: + - Specifies the line separator style to use for the modified file. This defaults + to the windows line separator (\r\n). Note that the indicated line separator + will be used for file output regardless of the original line seperator that + appears in the input file. + choices: [ "windows", "unix" ] + default: "windows" + +""" + +EXAMPLES = r""" +- win_lineinfile: dest=C:\\temp\\example.conf regexp=^name= line="name=JohnDoe" + +- win_lineinfile: dest=C:\\temp\\example.conf state=absent regexp="^name=" + +- win_lineinfile: dest=C:\\temp\\example.conf regexp='^127\.0\.0\.1' line='127.0.0.1 localhost' + +- win_lineinfile: dest=C:\\temp\\httpd.conf regexp="^Listen " insertafter="^#Listen " line="Listen 8080" + +- win_lineinfile: dest=C:\\temp\\services regexp="^# port for http" insertbefore="^www.*80/tcp" line="# port for http by default" + +# Create file if it doesnt exist with a specific encoding +- win_lineinfile: dest=C:\\temp\\utf16.txt create="yes" encoding="utf-16" line="This is a utf-16 encoded file" + +# Add a line to a file and ensure the resulting file uses unix line separators +- win_lineinfile: dest=C:\\temp\\testfile.txt line="Line added to file" newline="unix" + +""" From 653fd34ed7ed9c1af6908ce36a4996b7eb17bfe3 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen Date: Thu, 30 Jul 2015 09:02:18 +0530 Subject: [PATCH 279/386] Fix call to _expand_ppa --- packaging/os/apt_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index 8f6d18d09d5..a72c0505717 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -390,7 +390,7 @@ class UbuntuSourcesList(SourcesList): continue if source_line.startswith('ppa:'): - source, ppa_owner, ppa_name = self._expand_ppa(i[3]) + source, ppa_owner, ppa_name = self._expand_ppa(source_line) _repositories.append(source) else: _repositories.append(source_line) From 6afa1da9103a4cfe70dc9c2ede87a2961b22b61b Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen Date: Thu, 30 Jul 2015 10:45:23 +0530 Subject: [PATCH 280/386] Clarify HAVE_PYTHON_APT/install_python_apt handling in apt_repository MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1. Don't test check_mode in both the caller and in the callee. 2. Don't test HAVE_PYTHON_APT inside an if that tests HAVE_PYTHON_APT 3. Don't be irritatingly vague about why the module fails ("You may be seeing this because…"). Note that if «apt-get -y install python-apt» succeeds with rc==0, but for some reason python_apt is not usable afterwards, this will break because the imports in install_python_apt aren't wrapped inside a try/except. In other words, we assume that install_python_apt either succeeds or fails with a traceback. This commit doesn't affect that behaviour. --- packaging/os/apt_repository.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index a72c0505717..aa7e8d07a60 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -423,24 +423,24 @@ def main(): ) params = module.params - if params['install_python_apt'] and not HAVE_PYTHON_APT and not module.check_mode: - install_python_apt(module) - repo = module.params['repo'] state = module.params['state'] update_cache = module.params['update_cache'] sourceslist = None - if HAVE_PYTHON_APT: - if isinstance(distro, aptsources_distro.UbuntuDistribution): - sourceslist = UbuntuSourcesList(module, - add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module)) - elif HAVE_PYTHON_APT and \ - isinstance(distro, aptsources_distro.DebianDistribution) or isinstance(distro, aptsources_distro.Distribution): - sourceslist = SourcesList() + if not HAVE_PYTHON_APT: + if params['install_python_apt']: + install_python_apt(module) + else: + module.fail_json(msg='python-apt is not installed, and install_python_apt is False') + + if isinstance(distro, aptsources_distro.UbuntuDistribution): + sourceslist = UbuntuSourcesList(module, + add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module)) + elif isinstance(distro, aptsources_distro.DebianDistribution) or isinstance(distro, aptsources_distro.Distribution): + sourceslist = SourcesList() else: - module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu. ' + \ - 'You may be seeing this because python-apt is not installed, but you requested that it not be auto-installed') + module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu.') sources_before = sourceslist.dump() From 44d16240a8134fbeb25f5598cdb6c6017bc27c37 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen Date: Thu, 30 Jul 2015 13:06:50 +0530 Subject: [PATCH 281/386] Make SourcesList __init__ method also set self.module This was originally required to allow other methods in SourcesList to fail, but subsequent changes rendered that unnecessary, and it's just a cleanup now, and avoids passing in module separately to save(). --- packaging/os/apt_repository.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index aa7e8d07a60..0e6140651e1 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -124,7 +124,8 @@ class InvalidSource(Exception): # Simple version of aptsources.sourceslist.SourcesList. # No advanced logic and no backups inside. class SourcesList(object): - def __init__(self): + def __init__(self, module): + self.module = module self.files = {} # group sources by file # Repositories that we're adding -- used to implement mode param self.new_repos = set() @@ -234,7 +235,7 @@ class SourcesList(object): group.append((n, valid, enabled, source, comment)) self.files[file] = group - def save(self, module): + def save(self): for filename, sources in self.files.items(): if sources: d, fn = os.path.split(filename) @@ -255,13 +256,13 @@ class SourcesList(object): try: f.write(line) except IOError, err: - module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err))) - module.atomic_move(tmp_path, filename) + self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err))) + self.module.atomic_move(tmp_path, filename) # allow the user to override the default mode if filename in self.new_repos: - this_mode = module.params['mode'] - module.set_mode_if_different(filename, this_mode, False) + this_mode = self.module.params['mode'] + self.module.set_mode_if_different(filename, this_mode, False) else: del self.files[filename] if os.path.exists(filename): @@ -329,7 +330,7 @@ class UbuntuSourcesList(SourcesList): def __init__(self, module, add_ppa_signing_keys_callback=None): self.module = module self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback - super(UbuntuSourcesList, self).__init__() + super(UbuntuSourcesList, self).__init__(module) def _get_ppa_info(self, owner_name, ppa_name): lp_api = self.LP_API % (owner_name, ppa_name) @@ -438,7 +439,7 @@ def main(): sourceslist = UbuntuSourcesList(module, add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module)) elif isinstance(distro, aptsources_distro.DebianDistribution) or isinstance(distro, aptsources_distro.Distribution): - sourceslist = SourcesList() + sourceslist = SourcesList(module) else: module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu.') @@ -462,7 +463,7 @@ def main(): if not module.check_mode and changed: try: - sourceslist.save(module) + sourceslist.save() if update_cache: cache = apt.Cache() cache.update() From 74a27ffe52368d2248bec56037f7f565d288d697 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen Date: Thu, 30 Jul 2015 13:16:11 +0530 Subject: [PATCH 282/386] Simplify distribution test If it's Ubuntu, use UbuntuSourcesList; if it's any other apt-friendly distribution, use SourcesList; otherwise, fail. --- packaging/os/apt_repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index 0e6140651e1..71f78e2970c 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -438,7 +438,7 @@ def main(): if isinstance(distro, aptsources_distro.UbuntuDistribution): sourceslist = UbuntuSourcesList(module, add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module)) - elif isinstance(distro, aptsources_distro.DebianDistribution) or isinstance(distro, aptsources_distro.Distribution): + elif isinstance(distro, aptsources_distro.Distribution): sourceslist = SourcesList(module) else: module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu.') From 2fdb197245ca8ae968b03595f994bb549da607f5 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Wed, 29 Jul 2015 17:49:37 +0200 Subject: [PATCH 283/386] fix error occurring with Debian Error was: AttributeError: 'SourcesList' object has no attribute 'repos_urls' --- packaging/os/apt_repository.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index 71f78e2970c..750169325e3 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -360,6 +360,10 @@ class UbuntuSourcesList(SourcesList): if line.startswith('ppa:'): source, ppa_owner, ppa_name = self._expand_ppa(line) + if source in self.repos_urls: + # repository already exists + return + if self.add_ppa_signing_keys_callback is not None: info = self._get_ppa_info(ppa_owner, ppa_name) if not self._key_already_exists(info['signing_key_fingerprint']): @@ -445,13 +449,8 @@ def main(): sources_before = sourceslist.dump() - if repo.startswith('ppa:'): - expanded_repo = sourceslist._expand_ppa(repo)[0] - else: - expanded_repo = repo - try: - if state == 'present' and expanded_repo not in sourceslist.repos_urls: + if state == 'present': sourceslist.add_source(repo) elif state == 'absent': sourceslist.remove_source(repo) From c6da9d0758f31a6befe38c99df1243414c225197 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Jul 2015 13:04:30 -0400 Subject: [PATCH 284/386] added missing version_Added to extra_hosts fixes #1843 --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 35c5b5be489..71358df54d3 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -284,6 +284,7 @@ options: default: 0 version_added: "1.9" extra_hosts: + version_added: "2.0" description: - Dict of custom host-to-IP mappings to be defined in the container insecure_registry: From 4c4e97836027c3db4620b96271b03c1a73be5dc2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 30 Jul 2015 13:46:42 -0400 Subject: [PATCH 285/386] Fixing s3 failures when bucket names contain dots --- cloud/amazon/s3.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index da19cf0316f..7aee8be54ef 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -438,6 +438,12 @@ def main(): if not s3_url and 'S3_URL' in os.environ: s3_url = os.environ['S3_URL'] + # bucket names with .'s in them need to use the calling_format option, + # otherwise the connection will fail. See https://github.com/boto/boto/issues/2836 + # for more details. + if '.' in bucket: + aws_connect_kwargs['calling_format'] = OrdinaryCallingFormat() + # Look at s3_url and tweak connection settings # if connecting to Walrus or fakes3 try: @@ -454,7 +460,7 @@ def main(): walrus = urlparse.urlparse(s3_url).hostname s3 = boto.connect_walrus(walrus, **aws_connect_kwargs) else: - s3 = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_kwargs) + s3 = boto.s3.connect_to_region(location, is_secure=True, **aws_connect_kwargs) # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases if s3 is None: s3 = boto.connect_s3(**aws_connect_kwargs) From 79f21c6ec984e7ef39cd27f839096eecb4e0c713 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 30 Jul 2015 14:49:16 -0400 Subject: [PATCH 286/386] Fix missing params to download_s3file in s3 --- cloud/amazon/s3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 7aee8be54ef..5c97031c09c 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -492,7 +492,7 @@ def main(): # If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download. pathrtn = path_check(dest) if pathrtn is False or overwrite == 'always': - download_s3file(module, s3, bucket, obj, dest) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if pathrtn is True: From 1f7686fd312c5e0aa0ef86b3ad0bb796f61d1a03 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Jul 2015 17:04:41 -0400 Subject: [PATCH 287/386] returned version added --- commands/command.py | 1 + commands/raw.py | 1 + 2 files changed, 2 insertions(+) diff --git a/commands/command.py b/commands/command.py index 8107dd521e9..3fe16882c24 100644 --- a/commands/command.py +++ b/commands/command.py @@ -31,6 +31,7 @@ DOCUMENTATION = ''' --- module: command short_description: Executes a command on a remote node +version_added: historical description: - The M(command) module takes the command name followed by a list of space-delimited arguments. - The given command will be executed on all selected nodes. It will not be diff --git a/commands/raw.py b/commands/raw.py index 498ed3dabb0..8b9b796a6e7 100644 --- a/commands/raw.py +++ b/commands/raw.py @@ -19,6 +19,7 @@ DOCUMENTATION = ''' --- module: raw short_description: Executes a low-down and dirty SSH command +version_added: historical options: free_form: description: From bad636152ad261ccd2091d428bb9c814610c6f90 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Thu, 30 Jul 2015 15:48:34 -0400 Subject: [PATCH 288/386] Add check mode support for Windows modules that don't modify the remote system. --- windows/slurp.ps1 | 2 +- windows/win_ping.ps1 | 2 +- windows/win_stat.ps1 | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/windows/slurp.ps1 b/windows/slurp.ps1 index edf1da7635f..073acddeade 100644 --- a/windows/slurp.ps1 +++ b/windows/slurp.ps1 @@ -17,7 +17,7 @@ # WANT_JSON # POWERSHELL_COMMON -$params = Parse-Args $args; +$params = Parse-Args $args $true; $src = Get-Attr $params "src" (Get-Attr $params "path" $FALSE); If (-not $src) diff --git a/windows/win_ping.ps1 b/windows/win_ping.ps1 index 98f1415e290..a4dd60ef19b 100644 --- a/windows/win_ping.ps1 +++ b/windows/win_ping.ps1 @@ -17,7 +17,7 @@ # WANT_JSON # POWERSHELL_COMMON -$params = Parse-Args $args; +$params = Parse-Args $args $true; $data = Get-Attr $params "data" "pong"; diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 51c9c827093..cf8c14a4d49 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -17,7 +17,7 @@ # WANT_JSON # POWERSHELL_COMMON -$params = Parse-Args $args; +$params = Parse-Args $args $true; function Date_To_Timestamp($start_date, $end_date) { From 4928357667c642510b4ad8f81209a7d6f063266a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Jul 2015 11:23:29 -0700 Subject: [PATCH 289/386] Remove unneeded urllib2 import --- cloud/amazon/_ec2_ami_search.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index ec9da6d4988..8ef0c0046ea 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -83,7 +83,6 @@ EXAMPLES = ''' import csv import json -import urllib2 import urlparse SUPPORTED_DISTROS = ['ubuntu'] From be468ff9d3ff99eee669defd88aaf0c5237ac99a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 31 Jul 2015 20:57:14 -0700 Subject: [PATCH 290/386] result is now a dict and so needs different access syntax Fixes #1848 --- cloud/amazon/ec2_lc.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 179ef14d70f..60944d40c1f 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -239,12 +239,13 @@ def create_launch_config(connection, module): module.fail_json(msg=str(e)) result = dict( - ((a[0], a[1]) for a in vars(launch_configs[0]) if a[0] not in ('connection',))) + ((a[0], a[1]) for a in vars(launch_configs[0]).items() if a[0] not in ('connection', 'created_time'))) + result['created_time'] = str(launch_configs[0].created_time) - module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), - image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, - instance_type=result.instance_type, + module.exit_json(changed=changed, name=result['name'], created_time=result['created_time'], + image_id=result['image_id'], arn=result['launch_configuration_arn'], + security_groups=result['security_groups'], + instance_type=result['instance_type'], result=result) From a91db23b564df8a7d3024558b7f9799e4bc91494 Mon Sep 17 00:00:00 2001 From: Hagai Kariti Date: Tue, 30 Sep 2014 14:14:43 +0300 Subject: [PATCH 291/386] Add to the ec2_snapshot module the ability to create a snapshot only if one hasn't recently been created - Added snapshot_max_age parameter - Updated docs - Made the default value of wait to be false, as it used to be --- cloud/amazon/ec2_snapshot.py | 56 ++++++++++++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index 41dc4bdb21a..d2847aa937f 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -74,6 +74,12 @@ options: - snapshot id to remove required: false version_added: "1.9" + snapshot_max_age: + description: + - If the volume's most recent snapshot has started less than `snapshot_max_age' minutes ago, a new snapshot will not be created. + required: false + default: 0 + version_added: "1.9" author: "Will Thames (@willthames)" extends_documentation_fragment: aws @@ -104,6 +110,12 @@ EXAMPLES = ''' module: ec2_snapshot snapshot_id: snap-abcd1234 state: absent + +# Create a snapshot only if the most recent one is older than 1 hour +- local_action: + module: ec2_snapshot + volume_id: vol-abcdef12 + snapshot_max_age: 60 ''' import time @@ -124,8 +136,9 @@ def main(): instance_id = dict(), snapshot_id = dict(), device_name = dict(), - wait = dict(type='bool', default='true'), - wait_timeout = dict(default=0), + wait = dict(type='bool', default='false'), + wait_timeout = dict(type='int', default=0), + snapshot_max_age = dict(type='int', default=0), snapshot_tags = dict(type='dict', default=dict()), state = dict(choices=['absent','present'], default='present'), ) @@ -142,9 +155,13 @@ def main(): device_name = module.params.get('device_name') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') + snapshot_max_age = module.params.get('snapshot_max_age') snapshot_tags = module.params.get('snapshot_tags') state = module.params.get('state') + snapshot = None + changed = False + if not volume_id and not instance_id and not snapshot_id or volume_id and instance_id and snapshot_id: module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified') if instance_id and not device_name or device_name and not instance_id: @@ -175,10 +192,37 @@ def main(): else: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + if snapshot_max_age > 0: + try: + snapshot_max_age = snapshot_max_age * 60 # Convert to seconds + current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id}) + # Find the most recent snapshot + recent = dict(start_time=0, snapshot=None) + for s in current_snapshots: + start_time = time.mktime(time.strptime(s.start_time, '%Y-%m-%dT%H:%M:%S.000Z')) + if start_time > recent['start_time']: + recent['start_time'] = start_time + recent['snapshot'] = s + + # Adjust snapshot start time to local timezone + tz_adjustment = time.daylight and time.altzone or time.timezone + recent['start_time'] -= tz_adjustment + + # See if the snapshot is younger that the given max age + current_time = time.mktime(time.localtime()) + snapshot_age = current_time - recent['start_time'] + if snapshot_age < snapshot_max_age: + snapshot = recent['snapshot'] + except boto.exception.BotoServerError, e: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) + try: - snapshot = ec2.create_snapshot(volume_id, description=description) - time_waited = 0 + # Create a new snapshot if we didn't find an existing one to use + if snapshot is None: + snapshot = ec2.create_snapshot(volume_id, description=description) + changed = True if wait: + time_waited = 0 snapshot.update() while snapshot.status != 'completed': time.sleep(3) @@ -189,9 +233,9 @@ def main(): for k, v in snapshot_tags.items(): snapshot.add_tag(k, v) except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) - module.exit_json(changed=True, snapshot_id=snapshot.id, volume_id=snapshot.volume_id, + module.exit_json(changed=changed, snapshot_id=snapshot.id, volume_id=snapshot.volume_id, volume_size=snapshot.volume_size, tags=snapshot.tags.copy()) # import module snippets From 1f447ad5ba15f841473ada14cfbbac3ba6a06a11 Mon Sep 17 00:00:00 2001 From: Hagai Kariti Date: Thu, 12 Mar 2015 16:10:17 +0200 Subject: [PATCH 292/386] made `wait` default to True in ec2_snapshot --- cloud/amazon/ec2_snapshot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index d2847aa937f..0727c6cf524 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -136,7 +136,7 @@ def main(): instance_id = dict(), snapshot_id = dict(), device_name = dict(), - wait = dict(type='bool', default='false'), + wait = dict(type='bool', default=True), wait_timeout = dict(type='int', default=0), snapshot_max_age = dict(type='int', default=0), snapshot_tags = dict(type='dict', default=dict()), From d10f3f7a7e337059665a6357d73a3a4872f5bac7 Mon Sep 17 00:00:00 2001 From: Robert Jailall Date: Wed, 15 Apr 2015 13:56:33 -0400 Subject: [PATCH 293/386] Refactor ec2_snapshot to make it more testable --- cloud/amazon/ec2_snapshot.py | 236 ++++++++++++++++++++++------------- 1 file changed, 151 insertions(+), 85 deletions(-) diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index 0727c6cf524..29fd559bea5 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -74,9 +74,9 @@ options: - snapshot id to remove required: false version_added: "1.9" - snapshot_max_age: + last_snapshot_min_age: description: - - If the volume's most recent snapshot has started less than `snapshot_max_age' minutes ago, a new snapshot will not be created. + - If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created. required: false default: 0 version_added: "1.9" @@ -88,7 +88,7 @@ extends_documentation_fragment: aws EXAMPLES = ''' # Simple snapshot of volume using volume_id - ec2_snapshot: - volume_id: vol-abcdef12 + volume_id: vol-abcdef12 description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 # Snapshot of volume mounted on device_name attached to instance_id @@ -115,10 +115,11 @@ EXAMPLES = ''' - local_action: module: ec2_snapshot volume_id: vol-abcdef12 - snapshot_max_age: 60 -''' + last_snapshot_min_age: 60 +''' import time +import datetime try: import boto.ec2 @@ -127,7 +128,128 @@ except ImportError: HAS_BOTO = False -def main(): +# Find the most recent snapshot +def _get_snapshot_starttime(snap): + return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z') + + +def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None): + """ + Gets the most recently created snapshot and optionally filters the result + if the snapshot is too old + :param snapshots: list of snapshots to search + :param max_snapshot_age_secs: filter the result if its older than this + :param now: simulate time -- used for unit testing + :return: + """ + if len(snapshots) == 0: + return None + + if not now: + now = datetime.datetime.utcnow() + + youngest_snapshot = min(snapshots, key=_get_snapshot_starttime) + + # See if the snapshot is younger that the given max age + snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z') + snapshot_age = now - snapshot_start + + if max_snapshot_age_secs is not None: + if snapshot_age.total_seconds() > max_snapshot_age_secs: + return None + + return youngest_snapshot + + +def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep): + """ + Wait for the snapshot to be created + :param snapshot: + :param wait_timeout_secs: fail this step after this many seconds + :param sleep_func: + :return: + """ + time_waited = 0 + snapshot.update() + while snapshot.status != 'completed': + sleep_func(3) + snapshot.update() + time_waited += 3 + if wait_timeout_secs and time_waited > wait_timeout_secs: + return False + return True + + +def create_snapshot(module, ec2, state=None, description=None, wait=None, + wait_timeout=None, volume_id=None, instance_id=None, + snapshot_id=None, device_name=None, snapshot_tags=None, + last_snapshot_min_age=None): + snapshot = None + changed = False + + required = [volume_id, snapshot_id, instance_id] + if required.count(None) != len(required) - 1: # only 1 must be set + module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified') + if instance_id and not device_name or device_name and not instance_id: + module.fail_json(msg='Instance ID and device name must both be specified') + + if instance_id: + try: + volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name}) + except boto.exception.BotoServerError, e: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + + if not volumes: + module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id)) + + volume_id = volumes[0].id + + if state == 'absent': + if not snapshot_id: + module.fail_json(msg = 'snapshot_id must be set when state is absent') + try: + ec2.delete_snapshot(snapshot_id) + except boto.exception.BotoServerError, e: + # exception is raised if snapshot does not exist + if e.error_code == 'InvalidSnapshot.NotFound': + module.exit_json(changed=False) + else: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + + # successful delete + module.exit_json(changed=True) + + if last_snapshot_min_age > 0: + try: + current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id}) + except boto.exception.BotoServerError, e: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) + + last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds + snapshot = _get_most_recent_snapshot(current_snapshots, + max_snapshot_age_secs=last_snapshot_min_age) + try: + # Create a new snapshot if we didn't find an existing one to use + if snapshot is None: + snapshot = ec2.create_snapshot(volume_id, description=description) + changed = True + if wait: + if not _create_with_wait(snapshot, wait_timeout): + module.fail_json(msg='Timed out while creating snapshot.') + if snapshot_tags: + for k, v in snapshot_tags.items(): + snapshot.add_tag(k, v) + except boto.exception.BotoServerError, e: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) + + module.exit_json(changed=changed, + snapshot_id=snapshot.id, + volume_id=snapshot.volume_id, + volume_size=snapshot.volume_size, + tags=snapshot.tags.copy()) + + +def create_snapshot_ansible_module(): argument_spec = ec2_argument_spec() argument_spec.update( dict( @@ -138,12 +260,17 @@ def main(): device_name = dict(), wait = dict(type='bool', default=True), wait_timeout = dict(type='int', default=0), - snapshot_max_age = dict(type='int', default=0), + last_snapshot_min_age = dict(type='int', default=0), snapshot_tags = dict(type='dict', default=dict()), state = dict(choices=['absent','present'], default='present'), ) ) module = AnsibleModule(argument_spec=argument_spec) + return module + + +def main(): + module = create_snapshot_ansible_module() if not HAS_BOTO: module.fail_json(msg='boto required for this module') @@ -155,91 +282,30 @@ def main(): device_name = module.params.get('device_name') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') - snapshot_max_age = module.params.get('snapshot_max_age') + last_snapshot_min_age = module.params.get('last_snapshot_min_age') snapshot_tags = module.params.get('snapshot_tags') state = module.params.get('state') - snapshot = None - changed = False - - if not volume_id and not instance_id and not snapshot_id or volume_id and instance_id and snapshot_id: - module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified') - if instance_id and not device_name or device_name and not instance_id: - module.fail_json(msg='Instance ID and device name must both be specified') - ec2 = ec2_connect(module) - if instance_id: - try: - volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name}) - if not volumes: - module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id)) - volume_id = volumes[0].id - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - if state == 'absent': - if not snapshot_id: - module.fail_json(msg = 'snapshot_id must be set when state is absent') - try: - snapshots = ec2.get_all_snapshots([snapshot_id]) - ec2.delete_snapshot(snapshot_id) - module.exit_json(changed=True) - except boto.exception.BotoServerError, e: - # exception is raised if snapshot does not exist - if e.error_code == 'InvalidSnapshot.NotFound': - module.exit_json(changed=False) - else: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - - if snapshot_max_age > 0: - try: - snapshot_max_age = snapshot_max_age * 60 # Convert to seconds - current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id}) - # Find the most recent snapshot - recent = dict(start_time=0, snapshot=None) - for s in current_snapshots: - start_time = time.mktime(time.strptime(s.start_time, '%Y-%m-%dT%H:%M:%S.000Z')) - if start_time > recent['start_time']: - recent['start_time'] = start_time - recent['snapshot'] = s - - # Adjust snapshot start time to local timezone - tz_adjustment = time.daylight and time.altzone or time.timezone - recent['start_time'] -= tz_adjustment - - # See if the snapshot is younger that the given max age - current_time = time.mktime(time.localtime()) - snapshot_age = current_time - recent['start_time'] - if snapshot_age < snapshot_max_age: - snapshot = recent['snapshot'] - except boto.exception.BotoServerError, e: - module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) - - try: - # Create a new snapshot if we didn't find an existing one to use - if snapshot is None: - snapshot = ec2.create_snapshot(volume_id, description=description) - changed = True - if wait: - time_waited = 0 - snapshot.update() - while snapshot.status != 'completed': - time.sleep(3) - snapshot.update() - time_waited += 3 - if wait_timeout and time_waited > wait_timeout: - module.fail_json('Timed out while creating snapshot.') - for k, v in snapshot_tags.items(): - snapshot.add_tag(k, v) - except boto.exception.BotoServerError, e: - module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) - - module.exit_json(changed=changed, snapshot_id=snapshot.id, volume_id=snapshot.volume_id, - volume_size=snapshot.volume_size, tags=snapshot.tags.copy()) + create_snapshot( + module=module, + state=state, + description=description, + wait=wait, + wait_timeout=wait_timeout, + ec2=ec2, + volume_id=volume_id, + instance_id=instance_id, + snapshot_id=snapshot_id, + device_name=device_name, + snapshot_tags=snapshot_tags, + last_snapshot_min_age=last_snapshot_min_age + ) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() From 40519de8e7116b8076049eadcc80020811474cbd Mon Sep 17 00:00:00 2001 From: Ted Timmons Date: Sat, 1 Aug 2015 17:30:15 -0700 Subject: [PATCH 294/386] fix ugly documentation current version dumps a character per line in the docs: http://docs.ansible.com/ansible/cloudformation_module.html --- cloud/amazon/cloudformation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index e4ac80fcf5a..236bc89000d 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -79,7 +79,8 @@ options: required: false version_added: "2.0" template_format: - description: For local templates, allows specification of json or yaml format + description: + - For local templates, allows specification of json or yaml format default: json choices: [ json, yaml ] required: false From 2c35cfce9a9412b288af136bd75a8c6f7491332b Mon Sep 17 00:00:00 2001 From: Sam Yaple Date: Mon, 3 Aug 2015 10:49:37 +0000 Subject: [PATCH 295/386] Revert "escapeds changes" While this change doesn't break the creation, it does break idempotency. This change will convert '*.*' to '`*`.*' which is functionally the same, however when the user_mod() function looks up the current privileges with privileges_get() it will read '*.*' Since '*.*' != '`*`.*' it will go through the process of updating the privleges always resulting in a 'changed' result. This reverts commit db9ab9b2629f00350a743a4eca72fb5ee8dc8c77. --- database/mysql/mysql_user.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 7c72546706a..4f0dee5374c 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -320,8 +320,13 @@ def privileges_unpack(priv): privs = [] for item in priv.strip().split('/'): pieces = item.strip().split(':') - dbpriv = pieces[0].rsplit(".", 1) - pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1]) + if '.' in pieces[0]: + pieces[0] = pieces[0].split('.') + for idx, piece in enumerate(pieces): + if pieces[0][idx] != "*": + pieces[0][idx] = "`" + pieces[0][idx] + "`" + pieces[0] = '.'.join(pieces[0]) + if '(' in pieces[1]: output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper()) for i in output[pieces[0]]: From 1001eae3c8621b85721c2b8378cdcba533570648 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 3 Aug 2015 08:44:48 -0700 Subject: [PATCH 296/386] Set a default value for DEFAULT_DOCKER_API_VERSION so we can create the AnsibleModule without docker-py --- cloud/docker/docker_image.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 09fc61e6b08..e6cfd87ab43 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -137,6 +137,7 @@ try: except ImportError: HAS_DOCKER_CLIENT = False +DEFAULT_DOCKER_API_VERSION = None if HAS_DOCKER_CLIENT: try: from docker.errors import APIError as DockerAPIError From 8ad072c96fc8125d5142293c4f2eb451edb23b24 Mon Sep 17 00:00:00 2001 From: Shobhit Srivastava Date: Tue, 4 Aug 2015 12:52:56 +0530 Subject: [PATCH 297/386] checking remote_group_id while comparing os_security_group_rule --- cloud/openstack/os_security_group_rule.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 91059aca015..b2324b097ce 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -213,12 +213,14 @@ def _find_matching_rule(module, secgroup): remote_ip_prefix = module.params['remote_ip_prefix'] ethertype = module.params['ethertype'] direction = module.params['direction'] + remote_group_id = module.params['remote_group'] for rule in secgroup['security_group_rules']: if (protocol == rule['protocol'] and remote_ip_prefix == rule['remote_ip_prefix'] and ethertype == rule['ethertype'] and direction == rule['direction'] + and remote_group_id == rule['remote_group_id'] and _ports_match(protocol, module.params['port_range_min'], module.params['port_range_max'], From 06e722900e9c487da22074b10c2b1387993a9826 Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Wed, 5 Aug 2015 14:22:09 -0400 Subject: [PATCH 298/386] Adding the ability to associate eips with network interfaces --- cloud/amazon/ec2_eip.py | 151 +++++++++++++++++++++++++++------------- 1 file changed, 103 insertions(+), 48 deletions(-) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index a564612e220..6fc1360d3fe 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -26,6 +26,11 @@ options: description: - The EC2 instance id required: false + network_interface_id: + description: + - The Elastic Network Interface (ENI) id + required: false + version_added: "2.0" public_ip: description: - The elastic IP address to associate with the instance. @@ -57,7 +62,6 @@ options: required: false default: false version_added: "1.6" - extends_documentation_fragment: aws author: "Lorin Hochstein (@lorin) " notes: @@ -72,22 +76,21 @@ notes: EXAMPLES = ''' - name: associate an elastic IP with an instance ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 - +- name: associate an elastic IP with a device + ec2_eip: network_interface_id=eni-c8ad70f3 ip=93.184.216.119 - name: disassociate an elastic IP from an instance ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 state=absent - +- name: disassociate an elastic IP with a device + ec2_eip: network_interface_id=eni-c8ad70f3 ip=93.184.216.119 state=absent - name: allocate a new elastic IP and associate it with an instance ec2_eip: instance_id=i-1212f003 - - name: allocate a new elastic IP without associating it to anything action: ec2_eip register: eip - name: output the IP debug: msg="Allocated IP is {{ eip.public_ip }}" - - name: another way of allocating an elastic IP without associating it to anything ec2_eip: state='present' - - name: provision new instances with ec2 ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes''' ''' group=webserver count=3 @@ -95,7 +98,6 @@ EXAMPLES = ''' - name: associate new elastic IPs with each of the instances ec2_eip: "instance_id={{ item }}" with_items: ec2.instance_ids - - name: allocate a new elastic IP inside a VPC in us-west-2 ec2_eip: region=us-west-2 in_vpc=yes register: eip @@ -113,27 +115,27 @@ except ImportError: class EIPException(Exception): pass - -def associate_ip_and_instance(ec2, address, instance_id, check_mode): - if address_is_associated_with_instance(ec2, address, instance_id): +def associate_ip_and_device(ec2, address, device_id, check_mode, isinstance=True): + if address_is_associated_with_device(ec2, address, device_id, isinstance): return {'changed': False} # If we're in check mode, nothing else to do if not check_mode: - if address.domain == 'vpc': - res = ec2.associate_address(instance_id, - allocation_id=address.allocation_id) + if isinstance: + if address.domain == "vpc": + res = ec2.associate_address(device_id, allocation_id=address.allocation_id) + else: + res = ec2.associate_address(device_id, public_ip=address.public_ip) else: - res = ec2.associate_address(instance_id, - public_ip=address.public_ip) + res = ec2.associate_address(network_interface_id=device_id, allocation_id=address.allocation_id) if not res: raise EIPException('association failed') return {'changed': True} -def disassociate_ip_and_instance(ec2, address, instance_id, check_mode): - if not address_is_associated_with_instance(ec2, address, instance_id): +def disassociate_ip_and_device(ec2, address, device_id, check_mode, isinstance=True): + if not address_is_associated_with_device(ec2, address, device_id, isinstance): return {'changed': False} # If we're in check mode, nothing else to do @@ -158,24 +160,33 @@ def _find_address_by_ip(ec2, public_ip): raise -def _find_address_by_instance_id(ec2, instance_id): - addresses = ec2.get_all_addresses(None, {'instance-id': instance_id}) +def _find_address_by_device_id(ec2, device_id, isinstance=True): + if isinstance: + addresses = ec2.get_all_addresses(None, {'instance-id': device_id}) + else: + addresses = ec2.get_all_addresses(None, {'network-interface-id': device_id}) if addresses: return addresses[0] -def find_address(ec2, public_ip, instance_id): +def find_address(ec2, public_ip, device_id, isinstance=True): """ Find an existing Elastic IP address """ if public_ip: return _find_address_by_ip(ec2, public_ip) - elif instance_id: - return _find_address_by_instance_id(ec2, instance_id) + elif device_id and isinstance: + return _find_address_by_device_id(ec2, device_id) + elif device_id: + return _find_address_by_device_id(ec2, device_id, isinstance=False) -def address_is_associated_with_instance(ec2, address, instance_id): - """ Check if the elastic IP is currently associated with the instance """ +def address_is_associated_with_device(ec2, address, device_id, isinstance=True): + """ Check if the elastic IP is currently associated with the device """ + address = ec2.get_all_addresses(address.public_ip) if address: - return address and address.instance_id == instance_id + if isinstance: + return address and address[0].instance_id == device_id + else: + return address and address[0].network_interface_id == device_id return False @@ -186,7 +197,7 @@ def allocate_address(ec2, domain, reuse_existing_ip_allowed): all_addresses = ec2.get_all_addresses(filters=domain_filter) unassociated_addresses = [a for a in all_addresses - if not a.instance_id] + if not a.device_id] if unassociated_addresses: return unassociated_addresses[0] @@ -204,21 +215,33 @@ def release_address(ec2, address, check_mode): return {'changed': True} -def find_instance(ec2, instance_id): +def find_device(ec2, device_id, isinstance=True): """ Attempt to find the EC2 instance and return it """ - reservations = ec2.get_all_reservations(instance_ids=[instance_id]) + if isinstance: + try: + reservations = ec2.get_all_reservations(instance_ids=[device_id]) + except boto.exception.EC2ResponseError, e: + module.fail_json(msg=str(e)) - if len(reservations) == 1: - instances = reservations[0].instances - if len(instances) == 1: - return instances[0] + if len(reservations) == 1: + instances = reservations[0].instances + if len(instances) == 1: + return instances[0] + else: + try: + interfaces = ec2.get_all_network_interfaces(network_interface_ids=[device_id]) + except boto.exception.EC2ResponseError, e: + module.fail_json(msg=str(e)) - raise EIPException("could not find instance" + instance_id) + if len(interfaces) == 1: + return interfaces[0] + + raise EIPException("could not find instance" + device_id) -def ensure_present(ec2, domain, address, instance_id, - reuse_existing_ip_allowed, check_mode): +def ensure_present(ec2, domain, address, device_id, + reuse_existing_ip_allowed, check_mode, isinstance=True): changed = False # Return the EIP object since we've been given a public IP @@ -229,28 +252,39 @@ def ensure_present(ec2, domain, address, instance_id, address = allocate_address(ec2, domain, reuse_existing_ip_allowed) changed = True - if instance_id: + if device_id: # Allocate an IP for instance since no public_ip was provided - instance = find_instance(ec2, instance_id) + if isinstance: + instance = find_device(ec2, device_id) + # Associate address object (provided or allocated) with instance + assoc_result = associate_ip_and_device(ec2, address, device_id, + check_mode) + else: + instance = find_device(ec2, device_id, isinstance=False) + # Associate address object (provided or allocated) with instance + assoc_result = associate_ip_and_device(ec2, address, device_id, + check_mode, isinstance=False) + if instance.vpc_id: domain = 'vpc' - # Associate address object (provided or allocated) with instance - assoc_result = associate_ip_and_instance(ec2, address, instance_id, - check_mode) changed = changed or assoc_result['changed'] return {'changed': changed, 'public_ip': address.public_ip} -def ensure_absent(ec2, domain, address, instance_id, check_mode): +def ensure_absent(ec2, domain, address, device_id, check_mode, isinstance=True): if not address: return {'changed': False} # disassociating address from instance - if instance_id: - return disassociate_ip_and_instance(ec2, address, instance_id, - check_mode) + if device_id: + if isinstance: + return disassociate_ip_and_device(ec2, address, device_id, + check_mode) + else: + return disassociate_ip_and_device(ec2, address, device_id, + check_mode, isinstance=False) # releasing address else: return release_address(ec2, address, check_mode) @@ -260,6 +294,7 @@ def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( instance_id=dict(required=False), + network_interface_id=dict(required=False), public_ip=dict(required=False, aliases=['ip']), state=dict(required=False, default='present', choices=['present', 'absent']), @@ -280,6 +315,7 @@ def main(): ec2 = ec2_connect(module) instance_id = module.params.get('instance_id') + network_interface_id = module.params.get('network_interface_id') public_ip = module.params.get('public_ip') state = module.params.get('state') in_vpc = module.params.get('in_vpc') @@ -287,20 +323,39 @@ def main(): reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') try: - address = find_address(ec2, public_ip, instance_id) + if network_interface_id: + address = find_address(ec2, public_ip, network_interface_id, isinstance=False) + elif instance_id: + address = find_address(ec2, public_ip, instance_id) + else: + address = False if state == 'present': - result = ensure_present(ec2, domain, address, instance_id, + if instance_id: + result = ensure_present(ec2, domain, address, instance_id, reuse_existing_ip_allowed, module.check_mode) + elif network_interface_id: + result = ensure_present(ec2, domain, address, network_interface_id, + reuse_existing_ip_allowed, + module.check_mode, isinstance=False) + else: + address = allocate_address(ec2, domain, reuse_existing_ip_allowed) + result = {'changed': True, 'public_ip': address.public_ip} else: - result = ensure_absent(ec2, domain, address, instance_id, module.check_mode) + if network_interface_id: + result = ensure_absent(ec2, domain, address, network_interface_id, module.check_mode, isinstance=False) + elif instance_id: + result = ensure_absent(ec2, domain, address, instance_id, module.check_mode) + else: + address = find_address(ec2, public_ip, None) + result = release_address(ec2, address, module.check_mode) + except (boto.exception.EC2ResponseError, EIPException) as e: module.fail_json(msg=str(e)) module.exit_json(**result) - # import module snippets from ansible.module_utils.basic import * # noqa from ansible.module_utils.ec2 import * # noqa From 0071fd0bf72380bc60cfd79a4b51af5ec7307a02 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 5 Aug 2015 11:54:12 -0700 Subject: [PATCH 299/386] Allow mode as a synchronize parameter (handled in action plugin) --- files/synchronize.py | 1 + 1 file changed, 1 insertion(+) diff --git a/files/synchronize.py b/files/synchronize.py index 8266ece7b36..ee815acc561 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -252,6 +252,7 @@ def main(): ssh_args = dict(type='str'), partial = dict(default='no', type='bool'), verify_host = dict(default='no', type='bool'), + mode = dict(default='push', choices=['push', 'pull']), ), supports_check_mode = True ) From ed2d8121b3799c6640f8eb9c8c68ed85d22dcfda Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 5 Aug 2015 12:46:01 -0700 Subject: [PATCH 300/386] Update docs to reflect precedence for synchronize's dest_port param --- files/synchronize.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/synchronize.py b/files/synchronize.py index ee815acc561..3b73a3e16b7 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -34,8 +34,8 @@ options: required: true dest_port: description: - - Port number for ssh on the destination host. The ansible_ssh_port inventory var takes precedence over this value. - default: 22 + - Port number for ssh on the destination host. Prior to ansible 2.0, the ansible_ssh_port inventory var took precedence over this value. + default: Value of ansible_ssh_port for this host or 22 if that is not set version_added: "1.5" mode: description: From f39e5ffa1d7e157f71bbea15f349097f0ff07822 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 5 Aug 2015 13:05:59 -0700 Subject: [PATCH 301/386] Modify the default for synchronize's dest_port again --- files/synchronize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/synchronize.py b/files/synchronize.py index 3b73a3e16b7..1b9d4326fb5 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -35,7 +35,7 @@ options: dest_port: description: - Port number for ssh on the destination host. Prior to ansible 2.0, the ansible_ssh_port inventory var took precedence over this value. - default: Value of ansible_ssh_port for this host or 22 if that is not set + default: Value of ansible_ssh_port for this host, remote_port config setting, or 22 if none of those are set version_added: "1.5" mode: description: From 016b4ede2efdf75dd4b306d1325829d2d2b9dd03 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 5 Aug 2015 17:35:59 -0400 Subject: [PATCH 302/386] minor doc fix --- cloud/amazon/ec2_eip.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index a564612e220..8c9dfc76623 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -52,8 +52,7 @@ options: version_added: "1.4" reuse_existing_ip_allowed: description: - - Reuse an EIP that is not associated to an instance (when available),''' -''' instead of allocating a new one. + - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one. required: false default: false version_added: "1.6" From 599d60efc08d8b04a8e66b6b5c438354ae9a6a00 Mon Sep 17 00:00:00 2001 From: sebi Date: Thu, 6 Aug 2015 16:46:01 +0300 Subject: [PATCH 303/386] mem_limit client version check --- cloud/docker/docker.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 71358df54d3..1cf85843e0e 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1404,12 +1404,12 @@ class DockerManager(object): mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) except ValueError as e: self.module.fail_json(msg=str(e)) + api_version = self.client.version()['ApiVersion'] params = {'image': self.module.params.get('image'), 'command': self.module.params.get('command'), 'ports': self.exposed_ports, 'volumes': self.volumes, - 'mem_limit': mem_limit, 'environment': self.env, 'hostname': self.module.params.get('hostname'), 'domainname': self.module.params.get('domainname'), @@ -1421,10 +1421,16 @@ class DockerManager(object): 'host_config': self.create_host_config(), 'user': self.module.params.get('docker_user'), } - if self.ensure_capability('host_config', fail=False): params['host_config'] = self.get_host_config() + #For v1.19 API and above use HostConfig, otherwise use Config + if api_version < 1.19: + params['mem_limit'] = mem_limit + else: + params['host_config']['mem_limit'] = mem_limit + + def do_create(count, params): results = [] for _ in range(count): From 4c622a03381453991b16bc472ca28209b464146d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 6 Aug 2015 08:41:23 -0700 Subject: [PATCH 304/386] Another launchconfig field needs to be explicitly converted for json serialization Fixes #1848 --- cloud/amazon/ec2_lc.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 60944d40c1f..5891ebeadbf 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -77,7 +77,7 @@ options: - Kernel id for the EC2 instance required: false default: null - aliases: [] + aliases: [] spot_price: description: - The spot price you are bidding. Only applies for an autoscaling group with spot instances. @@ -239,8 +239,22 @@ def create_launch_config(connection, module): module.fail_json(msg=str(e)) result = dict( - ((a[0], a[1]) for a in vars(launch_configs[0]).items() if a[0] not in ('connection', 'created_time'))) + ((a[0], a[1]) for a in vars(launch_configs[0]).items() + if a[0] not in ('connection', 'created_time', 'instance_monitoring')) + ) result['created_time'] = str(launch_configs[0].created_time) + # Looking at boto's launchconfig.py, it looks like this could be a boolean + # value or an object with an enabled attribute. The enabled attribute + # could be a boolean or a string representation of a boolean. Since + # I can't test all permutations myself to see if my reading of the code is + # correct, have to code this *very* defensively + if launch_configs[0].instance_monitoring is True: + result['instance_monitoring'] = True + else: + try: + result['instance_monitoring'] = module.boolean(launch_configs[0].instance_monitoring.enabled) + except AttributeError: + result['instance_monitoring'] = False module.exit_json(changed=changed, name=result['name'], created_time=result['created_time'], image_id=result['image_id'], arn=result['launch_configuration_arn'], From 4f0cf6d2ca0b46d289f631496af373fac02adf1c Mon Sep 17 00:00:00 2001 From: muffl0n Date: Mon, 6 Oct 2014 09:53:35 +0200 Subject: [PATCH 305/386] Allow additional hashing algorithms. Directly use hashlib and check if used algorithm is supported. --- network/basics/get_url.py | 73 ++++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 28 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 5e39887df7f..f5c14812b29 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -72,9 +72,22 @@ options: - If a SHA-256 checksum is passed to this parameter, the digest of the destination file will be calculated after it is downloaded to ensure its integrity and verify that the transfer completed successfully. + This option is deprecated. Use 'checksum'. version_added: "1.3" required: false default: null + checksum: + description: + - If a checksum is passed to this parameter, the digest of the + destination file will be calculated after it is downloaded to ensure + its integrity and verify that the transfer completed successfully. + Format: :, e.g.: checksum="sha256:d98291acbedd510e3dbd36dbfdd83cbca8415220af43b327c0a0c574b6dc7b97" + If you worry about portability, only the sha1 algorithm is available + on all platforms and python versions. The third party hashlib + library can be installed for access to additional algorithms. + version_added: "2.0" + required: false + default: null use_proxy: description: - if C(no), it will not use a proxy, even if one is defined in @@ -136,24 +149,19 @@ EXAMPLES=''' - name: download foo.conf get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf mode=0440 -- name: download file with sha256 check - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c - - name: download file and force basic auth get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes - name: download file with custom HTTP headers get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf headers: 'key:value,key:value' + +- name: download file with check + get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c + get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=md5:66dffb5228a211e61d6d7ef4a86f5758 ''' import urlparse -try: - import hashlib - HAS_HASHLIB=True -except ImportError: - HAS_HASHLIB=False - # ============================================================== # url handling @@ -209,6 +217,7 @@ def extract_filename_from_headers(headers): return res + # ============================================================== # main @@ -219,6 +228,7 @@ def main(): url = dict(required=True), dest = dict(required=True), sha256sum = dict(default=''), + checksum = dict(default=''), timeout = dict(required=False, type='int', default=10), headers = dict(required=False, default=None), ) @@ -233,6 +243,7 @@ def main(): dest = os.path.expanduser(module.params['dest']) force = module.params['force'] sha256sum = module.params['sha256sum'] + checksum = module.params['checksum'] use_proxy = module.params['use_proxy'] timeout = module.params['timeout'] @@ -248,28 +259,37 @@ def main(): dest_is_dir = os.path.isdir(dest) last_mod_time = None - # Remove any non-alphanumeric characters, including the infamous - # Unicode zero-width space - stripped_sha256sum = re.sub(r'\W+', '', sha256sum) + # workaround for usage of deprecated sha256sum parameter + if sha256sum != '': + checksum = 'sha256:%s' % (sha256sum) + + # checksum specified, parse for algorithm and checksum + if checksum != '': + try: + algorithm, checksum = checksum.rsplit(':', 1) + # Remove any non-alphanumeric characters, including the infamous + # Unicode zero-width space + checksum = re.sub(r'\W+', '', checksum).lower() + # Ensure the checksum portion is a hexdigest + int(checksum, 16) + except ValueError: + module.fail_json(msg="The checksum parameter has to be in format :") - # Fail early if sha256 is not supported - if sha256sum != '' and not HAS_HASHLIB: - module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") if not dest_is_dir and os.path.exists(dest): checksum_mismatch = False # If the download is not forced and there is a checksum, allow # checksum match to skip the download. - if not force and sha256sum != '': - destination_checksum = module.sha256(dest) + if not force and checksum != '': + destination_checksum = module.digest_from_file(dest, algorithm) - if stripped_sha256sum.lower() == destination_checksum: + if checksum == destination_checksum: module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) checksum_mismatch = True - # Not forcing redownload, unless sha256sum has already failed + # Not forcing redownload, unless checksum does not match if not force and not checksum_mismatch: module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) @@ -330,14 +350,12 @@ def main(): else: changed = False - # Check the digest of the destination file and ensure that it matches the - # sha256sum parameter if it is present - if sha256sum != '': - destination_checksum = module.sha256(dest) + if checksum != '': + destination_checksum = module.digest_from_file(dest, algorithm) - if stripped_sha256sum.lower() != destination_checksum: + if checksum != destination_checksum: os.remove(dest) - module.fail_json(msg="The SHA-256 checksum for %s did not match %s; it was %s." % (dest, sha256sum, destination_checksum)) + module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (dest, checksum, destination_checksum)) os.remove(tmpsrc) @@ -354,9 +372,8 @@ def main(): md5sum = None # Mission complete - - module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum, checksum=checksum_src, - sha256sum=sha256sum, changed=changed, msg=info.get('msg', '')) + module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum, checksum_src=checksum_src, + checksum_dest=checksum_dest, changed=changed, msg=info.get('msg', '')) # import module snippets from ansible.module_utils.basic import * From 8412adc4f93b5d3b87368a50306f52aeb4844b3f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 6 Aug 2015 21:10:03 -0700 Subject: [PATCH 306/386] Quote checksum description to fix docs --- network/basics/get_url.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index f5c14812b29..db3c73b2cd7 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -78,13 +78,13 @@ options: default: null checksum: description: - - If a checksum is passed to this parameter, the digest of the + - 'If a checksum is passed to this parameter, the digest of the destination file will be calculated after it is downloaded to ensure its integrity and verify that the transfer completed successfully. Format: :, e.g.: checksum="sha256:d98291acbedd510e3dbd36dbfdd83cbca8415220af43b327c0a0c574b6dc7b97" If you worry about portability, only the sha1 algorithm is available on all platforms and python versions. The third party hashlib - library can be installed for access to additional algorithms. + library can be installed for access to additional algorithms.' version_added: "2.0" required: false default: null From dea5f5d15eb907525f30a1de40d1aa9a4c7e3a96 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 7 Aug 2015 12:27:57 -0700 Subject: [PATCH 307/386] Another way that the serialization of the boto results can fail --- cloud/amazon/ec2_lc.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 5891ebeadbf..e0d7d2c1a64 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -240,7 +240,7 @@ def create_launch_config(connection, module): result = dict( ((a[0], a[1]) for a in vars(launch_configs[0]).items() - if a[0] not in ('connection', 'created_time', 'instance_monitoring')) + if a[0] not in ('connection', 'created_time', 'instance_monitoring', 'block_device_mappings')) ) result['created_time'] = str(launch_configs[0].created_time) # Looking at boto's launchconfig.py, it looks like this could be a boolean @@ -255,6 +255,13 @@ def create_launch_config(connection, module): result['instance_monitoring'] = module.boolean(launch_configs[0].instance_monitoring.enabled) except AttributeError: result['instance_monitoring'] = False + if launch_configs[0].block_device_mappings is not None: + result['block_device_mappings'] = [] + for bdm in launch_configs[0].block_device_mappings: + result['block_device_mappings'].append(dict(device_name=bdm.device_name, virtual_name=bdm.virtual_name)) + if bdm.ebs is not None: + result['block_device_mappings'][-1]['ebs'] = dict(snapshot_id=bdm.ebs.snapshot_id, volume_size=bdm.ebs.volume_size) + module.exit_json(changed=changed, name=result['name'], created_time=result['created_time'], image_id=result['image_id'], arn=result['launch_configuration_arn'], From 7f01246bb574ce4f407f4a803f4e498a5b37baa7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 10 Aug 2015 10:11:40 -0400 Subject: [PATCH 308/386] updated win_file version_added fixes #1893 --- windows/win_file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_file.py b/windows/win_file.py index 062b4bfe92e..895da567d86 100644 --- a/windows/win_file.py +++ b/windows/win_file.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: win_file -version_added: "1.8" +version_added: "1.9.2" short_description: Creates, touches or removes files or directories. description: - Creates (empty) files, updates file modification stamps of existing files, From 5c250578684676e5f2117c1d832c5b3b0aeaad12 Mon Sep 17 00:00:00 2001 From: Austin Hyde Date: Mon, 10 Aug 2015 17:42:26 -0400 Subject: [PATCH 309/386] Always end rc.conf entries with a new line --- system/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 2ac7f2a4ad7..4969cadc7d6 100644 --- a/system/service.py +++ b/system/service.py @@ -359,7 +359,7 @@ class Service(object): self.changed = True # Add line to the list. - new_rc_conf.append(rcline) + new_rc_conf.append(rcline.strip() + '\n') # We are done with reading the current rc.conf, close it. RCFILE.close() From afb9b8e2f3817347b34c53e929be67158a3a1f08 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 10 Aug 2015 09:34:55 -0400 Subject: [PATCH 310/386] added gpg2 as fallback fixes #1796 --- packaging/os/rpm_key.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/packaging/os/rpm_key.py b/packaging/os/rpm_key.py index d2d5e684015..b3cb6c99ed7 100644 --- a/packaging/os/rpm_key.py +++ b/packaging/os/rpm_key.py @@ -141,7 +141,14 @@ class RpmKey: return ret def getkeyid(self, keyfile): - gpg = self.module.get_bin_path('gpg', True) + + gpg = self.module.get_bin_path('gpg') + if not gpg: + gpg = self.module.get_bin_path('gpg2') + + if not gpg: + self.json_fail(msg="rpm_key requires a command lne gpg or gpg2, none found") + stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile]) for line in stdout.splitlines(): line = line.strip() From d1398251691849e6e6b0bb274a35bf22fd371f03 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 11 Aug 2015 11:53:38 -0400 Subject: [PATCH 311/386] fixed typo --- packaging/os/rpm_key.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/rpm_key.py b/packaging/os/rpm_key.py index b3cb6c99ed7..1d2d208e4be 100644 --- a/packaging/os/rpm_key.py +++ b/packaging/os/rpm_key.py @@ -147,7 +147,7 @@ class RpmKey: gpg = self.module.get_bin_path('gpg2') if not gpg: - self.json_fail(msg="rpm_key requires a command lne gpg or gpg2, none found") + self.json_fail(msg="rpm_key requires a command line gpg or gpg2, none found") stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile]) for line in stdout.splitlines(): From 55e8863cb448a19ff4ee7f1f65b78c3a44893cee Mon Sep 17 00:00:00 2001 From: Evgeny Vereshchagin Date: Wed, 12 Aug 2015 13:00:53 +0000 Subject: [PATCH 312/386] Fix service.enable on: Debian 8, Ubuntu 15.04 Workaround for https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=751638 See https://github.com/systemd/systemd/issues/937 --- system/service.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/system/service.py b/system/service.py index 2ac7f2a4ad7..e79a88a2dde 100644 --- a/system/service.py +++ b/system/service.py @@ -503,10 +503,21 @@ class LinuxService(Service): self.svc_initctl = location['initctl'] def get_systemd_service_enabled(self): - (rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, self.__systemd_unit,)) + def sysv_exists(name): + script = '/etc/init.d/' + name + return os.access(script, os.X_OK) + + def sysv_is_enabled(name): + return bool(glob.glob('/etc/rc?.d/S??' + name)) + + service_name = self.__systemd_unit + (rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, service_name,)) if rc == 0: return True - return False + elif sysv_exists(service_name): + return sysv_is_enabled(service_name) + else: + return False def get_systemd_status_dict(self): (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,)) From 63d7bbea19442fa98bd64b20a779d5923745891d Mon Sep 17 00:00:00 2001 From: "Michael J. Schultz" Date: Wed, 12 Aug 2015 12:13:29 -0500 Subject: [PATCH 313/386] Return an iterable instead of None By default `.get()` will return `None` on a key that doesn't exist. This causes a `TypeError` in the `for` loop a few lines down. This change simply returns an iterable type to avoid the error. --- cloud/amazon/ec2_metric_alarm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py index 578a1af7297..b9ac1524794 100644 --- a/cloud/amazon/ec2_metric_alarm.py +++ b/cloud/amazon/ec2_metric_alarm.py @@ -184,7 +184,7 @@ def create_metric_alarm(connection, module): comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'} alarm.comparison = comparisons[comparison] - dim1 = module.params.get('dimensions') + dim1 = module.params.get('dimensions', {}) dim2 = alarm.dimensions for keys in dim1: From 7a2687178e19fbeaf226c0776726fec032e328ed Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 12 Aug 2015 18:10:24 -0400 Subject: [PATCH 314/386] avoid shlex exceptions in rc parsing (happens with multiline entries) fixes #1898 --- system/service.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index e79a88a2dde..6fb6197ba6e 100644 --- a/system/service.py +++ b/system/service.py @@ -980,7 +980,11 @@ class FreeBsdService(Service): rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)) cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments) - rcvars = shlex.split(stdout, comments=True) + try: + rcvars = shlex.split(stdout, comments=True) + except: + #TODO: add a warning to the output with the failure + continue if not rcvars: self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr) From 6d7428527d05d561b634b23bbefb6cee88e606cf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 12 Aug 2015 23:55:01 -0400 Subject: [PATCH 315/386] minor doc fixes --- system/cron.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/system/cron.py b/system/cron.py index 57455d6e19d..f2de1bcec0f 100644 --- a/system/cron.py +++ b/system/cron.py @@ -120,13 +120,16 @@ options: disabled: description: - If the job should be disabled (commented out) in the crontab. Only has effect if state=present - version_added: "1.9" + version_added: "2.0" required: false default: false requirements: - cron -author: "Dane Summers (@dsummersl)" -updates: [ 'Mike Grozak', 'Patrick Callahan', 'Evan Kaufman' ] +author: + - "Dane Summers (@dsummersl)" + - 'Mike Grozak' + - 'Patrick Callahan' + - 'Evan Kaufman (@EvanK)' """ EXAMPLES = ''' From 5d7cb981d3f5932a343c4be1d58ec266108b4b1f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 13 Aug 2015 10:05:30 -0400 Subject: [PATCH 316/386] made continue into a pass (there was no loop) --- system/service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/service.py b/system/service.py index edf5e8e6d34..8495bec9e24 100644 --- a/system/service.py +++ b/system/service.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: service -author: +author: - "Ansible Core Team" - "Michael DeHaan" version_added: "0.1" @@ -984,7 +984,7 @@ class FreeBsdService(Service): rcvars = shlex.split(stdout, comments=True) except: #TODO: add a warning to the output with the failure - continue + pass if not rcvars: self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr) From 9bcf69a155836d265575a6b74eb310a0679816b6 Mon Sep 17 00:00:00 2001 From: Marius Gedminas Date: Thu, 13 Aug 2015 19:47:49 +0300 Subject: [PATCH 317/386] Fix obvious copy-paste error in module docs --- files/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/copy.py b/files/copy.py index f9f1b365c74..8f6d3d32f28 100644 --- a/files/copy.py +++ b/files/copy.py @@ -162,7 +162,7 @@ size: type: int sample: 1220 state: - description: permissions of the target, after execution + description: state of the target, after execution returned: success type: string sample: "file" From 8ff80c2c91819c0de39e2cb2f4aed9831ea54af1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 13 Aug 2015 14:56:15 -0400 Subject: [PATCH 318/386] updated docs for package --- packaging/os/package.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packaging/os/package.py b/packaging/os/package.py index 288ca83a772..f4234b5a472 100644 --- a/packaging/os/package.py +++ b/packaging/os/package.py @@ -33,7 +33,8 @@ description: options: name: description: - - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file. To operate on several packages this can accept a comma separated list of packages or (as of 2.0) a list of packages." + - "Package name, or package specifier with version, like C(name-1.0)." + - "Be aware that packages are not always named the same and this module will not 'translate' them per distro." required: true state: description: @@ -42,6 +43,7 @@ options: use: description: - The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it. + - You should only use this field if the automatic selection is not working for some reason. required: false default: auto requirements: From 6a8108133ddcb7ac0450c0f0894948eca30b363e Mon Sep 17 00:00:00 2001 From: "David M. Lee" Date: Fri, 14 Aug 2015 13:01:44 -0500 Subject: [PATCH 319/386] ec2_vol: Added missing "needs 2.0" doc The ability to find-or-create a volume was added in 2.0. Added note to the example. --- cloud/amazon/ec2_vol.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 4b829f7c26e..228bb12cfbc 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -146,6 +146,7 @@ EXAMPLES = ''' # Example: Launch an instance and then add a volume if not already attached # * Volume will be created with the given name if not already created. # * Nothing will happen if the volume is already attached. +# * Requires Ansible 2.0 - ec2: keypair: "{{ keypair }}" From f8027a7f0ac4f3b036915ef8909d06f758e4c29a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Fri, 14 Aug 2015 20:46:59 +0000 Subject: [PATCH 320/386] Ensure Python 2.4 compatibility and Linux-restricted support - Make build_entry compatible with Python 2.4 - Re-add missing warning/comment that was forgotten while refactoring - Replace `all()` with a good ol' for-loop Python 2.4 compatibility - Make a condition check more explicit (when `state` is `query`) - Make sure this module can only be run with on a Linux distribution - Add a note about Linux-only support in the documentation - Set the version in which recursive support was added, 2.0 --- files/acl.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/files/acl.py b/files/acl.py index 808e854ad84..133bb379329 100644 --- a/files/acl.py +++ b/files/acl.py @@ -21,6 +21,8 @@ version_added: "1.4" short_description: Sets and retrieves file ACL information. description: - Sets and retrieves file ACL information. +notes: + - As of Ansible 2.0, this module only supports Linux distributions. options: name: required: true @@ -80,7 +82,7 @@ options: - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields. recursive: - version_added: "@@@" + version_added: "2.0" required: false default: no choices: [ 'yes', 'no' ] @@ -150,7 +152,10 @@ def split_entry(entry): def build_entry(etype, entity, permissions=None): '''Builds and returns an entry string. Does not include the permissions bit if they are not provided.''' - return etype + ':' + entity + (':' + permissions if permissions else '') + if permissions: + return etype + ':' + entity + ':' + permissions + else: + return etype + ':' + entity def build_command(module, mode, path, follow, default, recursive, entry=''): @@ -163,6 +168,7 @@ def build_command(module, mode, path, follow, default, recursive, entry=''): cmd.append('-x "%s"' % entry) else: # mode == 'get' cmd = [module.get_bin_path('getfacl', True)] + # prevents absolute path warnings and removes headers cmd.append('--omit-header') cmd.append('--absolute-names') @@ -187,7 +193,11 @@ def acl_changed(module, cmd): cmd = cmd[:] # lists are mutables so cmd would be overriden without this cmd.insert(1, '--test') lines = run_acl(module, cmd) - return not all(line.endswith('*,*') for line in lines) + + for line in lines: + if not line.endswith('*,*'): + return False + return True def run_acl(module, cmd, check_rc=True): @@ -206,6 +216,9 @@ def run_acl(module, cmd, check_rc=True): def main(): + if get_platform().lower() != 'linux': + module.fail_json(msg="The acl module is only available for Linux distributions.") + module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['path'], type='str'), @@ -295,7 +308,7 @@ def main(): run_acl(module, command, False) msg = "%s is absent" % entry - else: + elif state == 'query': msg = "current acl" acl = run_acl( From acfc2166b0234e12eb35c6486c66269219e6727c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Fri, 14 Aug 2015 23:40:47 +0000 Subject: [PATCH 321/386] Set version number in recursive option description --- files/acl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/acl.py b/files/acl.py index 133bb379329..6499284f778 100644 --- a/files/acl.py +++ b/files/acl.py @@ -87,7 +87,7 @@ options: default: no choices: [ 'yes', 'no' ] description: - - Recursively sets the specified ACL (added in Ansible @@@). Incompatible with C(state=query). + - Recursively sets the specified ACL (added in Ansible 2.0). Incompatible with C(state=query). author: "Brian Coca (@bcoca)" notes: From d61fde42efce82741ca9809fe027bb8979313d2a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 14 Aug 2015 22:01:05 -0400 Subject: [PATCH 322/386] updated encoding comment and expanded authors field --- files/acl.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/files/acl.py b/files/acl.py index 6499284f778..8b93da1661f 100644 --- a/files/acl.py +++ b/files/acl.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -88,8 +89,9 @@ options: choices: [ 'yes', 'no' ] description: - Recursively sets the specified ACL (added in Ansible 2.0). Incompatible with C(state=query). - -author: "Brian Coca (@bcoca)" +author: + - "Brian Coca (@bcoca)" + - "Jérémie Astori (@astorije)" notes: - The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed. ''' From d9511729208cdef60630a13d30bdf67008b40522 Mon Sep 17 00:00:00 2001 From: Ilya Epifanov Date: Sun, 16 Aug 2015 18:34:56 +0300 Subject: [PATCH 323/386] fixed memory_limit for docker api version >= 1.19 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 1cf85843e0e..e045e2ce1fc 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1428,7 +1428,7 @@ class DockerManager(object): if api_version < 1.19: params['mem_limit'] = mem_limit else: - params['host_config']['mem_limit'] = mem_limit + params['host_config']['Memory'] = mem_limit def do_create(count, params): From 31ec203583919dd57633d1a4080ae17d4d90a5a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D0=BB=D0=B5=D0=BA=D1=81=D0=B0=D0=BD=D0=B4=D1=80=20?= =?UTF-8?q?=D0=9A=D0=BE=D1=81=D1=82=D1=8B=D1=80=D0=B5=D0=B2?= Date: Mon, 17 Aug 2015 18:00:04 +0300 Subject: [PATCH 324/386] Fix documentation of authorized_key.py man for sshd http://www.openbsd.org/cgi-bin/man.cgi/OpenBSD-current/man8/sshd.8?query=sshd says there's no "host" option but "from". --- system/authorized_key.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 5c12cfdde92..f9f773d8d90 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -109,7 +109,7 @@ EXAMPLES = ''' # Using key_options: - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - key_options='no-port-forwarding,host="10.0.1.1"' + key_options='no-port-forwarding,from="10.0.1.1"' # Set up authorized_keys exclusively with one key - authorized_key: user=root key=public_keys/doe-jane state=present From f459b3773cba0579d4a2ab5440e366c1cdd5b76a Mon Sep 17 00:00:00 2001 From: Mahesh Sawaiker Date: Mon, 17 Aug 2015 16:28:18 +0000 Subject: [PATCH 325/386] support creating role only --- cloud/openstack/keystone_user.py | 33 +++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/cloud/openstack/keystone_user.py b/cloud/openstack/keystone_user.py index a3529c290b3..2596eab980c 100644 --- a/cloud/openstack/keystone_user.py +++ b/cloud/openstack/keystone_user.py @@ -252,8 +252,17 @@ def ensure_user_exists(keystone, user_name, password, email, tenant_name, email=email, tenant_id=tenant.id) return (True, user.id) +def ensure_role_exists(keystone, role_name): + # Get the role if it exists + try: + role = get_role(keystone, role_name) + except KeyError: + # Role doesn't exist yet + role = keystone.roles.create(role_name) + return (True, role.id) -def ensure_role_exists(keystone, user_name, tenant_name, role_name, + +def ensure_user_role_exists(keystone, user_name, tenant_name, role_name, check_mode): """ Check if role exists @@ -297,9 +306,11 @@ def ensure_user_absent(keystone, user, check_mode): raise NotImplementedError("Not yet implemented") -def ensure_role_absent(keystone, uesr, tenant, role, check_mode): +def ensure_user_role_absent(keystone, uesr, tenant, role, check_mode): raise NotImplementedError("Not yet implemented") +def ensure_role_absent(keystone, role_name): + raise NotImplementedError("Not yet implemented") def main(): @@ -378,14 +389,18 @@ def dispatch(keystone, user=None, password=None, tenant=None, X absent ensure_tenant_absent X X present ensure_user_exists X X absent ensure_user_absent - X X X present ensure_role_exists - X X X absent ensure_role_absent - - + X X X present ensure_user_role_exists + X X X absent ensure_user_role_absent + X present ensure_role_exists + X absent ensure_role_absent """ changed = False id = None - if tenant and not user and not role and state == "present": + if not tenant and not user and role and state == "present": + ensure_role_exists(keystone, role) + elif not tenant and not user and role and state == "absent": + ensure_role_absent(keystone, role) + elif tenant and not user and not role and state == "present": changed, id = ensure_tenant_exists(keystone, tenant, tenant_description, check_mode) elif tenant and not user and not role and state == "absent": @@ -396,10 +411,10 @@ def dispatch(keystone, user=None, password=None, tenant=None, elif tenant and user and not role and state == "absent": changed = ensure_user_absent(keystone, user, check_mode) elif tenant and user and role and state == "present": - changed, id = ensure_role_exists(keystone, user, tenant, role, + changed, id = ensure_user_role_exists(keystone, user, tenant, role, check_mode) elif tenant and user and role and state == "absent": - changed = ensure_role_absent(keystone, user, tenant, role, check_mode) + changed = ensure_user_role_absent(keystone, user, tenant, role, check_mode) else: # Should never reach here raise ValueError("Code should never reach here") From dfac073343b5cf293de0c96fa9777acfefe3af55 Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Thu, 9 Jul 2015 23:55:56 +0200 Subject: [PATCH 326/386] Rebase start/stop instance pull code --- cloud/amazon/ec2.py | 98 ++++++++++++++++++++++++++++----------------- 1 file changed, 62 insertions(+), 36 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 55c45a647f4..e8aaccaa10f 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -144,7 +144,7 @@ options: instance_tags: version_added: "1.0" description: - - a hash/dictionary of tags to add to the new instance; '{"key":"value"}' and '{"key":"value","key":"value"}' + - a hash/dictionary of tags to add to the new instance or for for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}' required: false default: null aliases: [] @@ -229,19 +229,19 @@ options: exact_count: version_added: "1.5" description: - - An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value. + - An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value. required: false default: null aliases: [] count_tag: version_added: "1.5" description: - - Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver". + - Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver". required: false default: null aliases: [] -author: +author: - "Tim Gerla (@tgerla)" - "Lester Wade (@lwade)" - "Seth Vidal" @@ -271,7 +271,7 @@ EXAMPLES = ''' wait: yes wait_timeout: 500 count: 5 - instance_tags: + instance_tags: db: postgres monitoring: yes vpc_subnet_id: subnet-29e63245 @@ -305,7 +305,7 @@ EXAMPLES = ''' wait: yes wait_timeout: 500 count: 5 - instance_tags: + instance_tags: db: postgres monitoring: yes vpc_subnet_id: subnet-29e63245 @@ -366,7 +366,7 @@ EXAMPLES = ''' region: us-east-1 tasks: - name: Launch instance - ec2: + ec2: key_name: "{{ keypair }}" group: "{{ security_group }}" instance_type: "{{ instance_type }}" @@ -446,6 +446,15 @@ EXAMPLES = ''' vpc_subnet_id: subnet-29e63245 assign_public_ip: yes +# +# Start stopped instances specified by tag +# +- local_action: + module: ec2 + instance_tags: + Name: ExtraPower + state: running + # # Enforce that 5 instances with a tag "foo" are running # (Highly recommended!) @@ -474,11 +483,11 @@ EXAMPLES = ''' image: ami-40603AD1 wait: yes group: webserver - instance_tags: + instance_tags: Name: database dbtype: postgres exact_count: 5 - count_tag: + count_tag: Name: database dbtype: postgres vpc_subnet_id: subnet-29e63245 @@ -531,8 +540,8 @@ def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None): for res in reservations: if hasattr(res, 'instances'): for inst in res.instances: - instances.append(inst) - + instances.append(inst) + return reservations, instances @@ -543,7 +552,7 @@ def _set_none_to_blank(dictionary): result[k] = _set_none_to_blank(result[k]) elif not result[k]: result[k] = "" - return result + return result def get_reservations(module, ec2, tags=None, state=None, zone=None): @@ -682,7 +691,7 @@ def create_block_device(module, ec2, volume): # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ MAX_IOPS_TO_SIZE_RATIO = 30 if 'snapshot' not in volume and 'ephemeral' not in volume: - if 'volume_size' not in volume: + if 'volume_size' not in volume: module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume') if 'snapshot' in volume: if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume: @@ -695,7 +704,7 @@ def create_block_device(module, ec2, volume): if 'encrypted' in volume: module.fail_json(msg = 'You can not set encyrption when creating a volume from a snapshot') if 'ephemeral' in volume: - if 'snapshot' in volume: + if 'snapshot' in volume: module.fail_json(msg = 'Cannot set both ephemeral and snapshot') return BlockDeviceType(snapshot_id=volume.get('snapshot'), ephemeral_name=volume.get('ephemeral'), @@ -760,18 +769,18 @@ def enforce_count(module, ec2, vpc): for inst in instance_dict_array: inst['state'] = "terminated" terminated_list.append(inst) - instance_dict_array = terminated_list - - # ensure all instances are dictionaries + instance_dict_array = terminated_list + + # ensure all instances are dictionaries all_instances = [] for inst in instances: if type(inst) is not dict: inst = get_instance_info(inst) - all_instances.append(inst) + all_instances.append(inst) return (all_instances, instance_dict_array, changed_instance_ids, changed) - - + + def create_instances(module, ec2, vpc, override_count=None): """ Creates new instances @@ -879,7 +888,7 @@ def create_instances(module, ec2, vpc, override_count=None): if ebs_optimized: params['ebs_optimized'] = ebs_optimized - + # 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest if not spot_price: params['tenancy'] = tenancy @@ -912,7 +921,7 @@ def create_instances(module, ec2, vpc, override_count=None): groups=group_id, associate_public_ip_address=assign_public_ip) interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) - params['network_interfaces'] = interfaces + params['network_interfaces'] = interfaces else: params['subnet_id'] = vpc_subnet_id if vpc_subnet_id: @@ -922,11 +931,11 @@ def create_instances(module, ec2, vpc, override_count=None): if volumes: bdm = BlockDeviceMapping() - for volume in volumes: + for volume in volumes: if 'device_name' not in volume: module.fail_json(msg = 'Device name must be set for volume') # Minimum volume size is 1GB. We'll use volume size explicitly set to 0 - # to be a signal not to create this volume + # to be a signal not to create this volume if 'volume_size' not in volume or int(volume['volume_size']) > 0: bdm[volume['device_name']] = create_block_device(module, ec2, volume) @@ -1016,7 +1025,7 @@ def create_instances(module, ec2, vpc, override_count=None): num_running = 0 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and num_running < len(instids): - try: + try: res_list = ec2.get_all_instances(instids) except boto.exception.BotoServerError, e: if e.error_code == 'InvalidInstanceID.NotFound': @@ -1029,7 +1038,7 @@ def create_instances(module, ec2, vpc, override_count=None): for res in res_list: num_running += len([ i for i in res.instances if i.state=='running' ]) if len(res_list) <= 0: - # got a bad response of some sort, possibly due to + # got a bad response of some sort, possibly due to # stale/cached data. Wait a second and then try again time.sleep(1) continue @@ -1141,12 +1150,12 @@ def terminate_instances(module, ec2, instance_ids): filters={'instance-state-name':'terminated'}): for inst in res.instances: instance_dict_array.append(get_instance_info(inst)) - + return (changed, instance_dict_array, terminated_instance_ids) -def startstop_instances(module, ec2, instance_ids, state): +def startstop_instances(module, ec2, instance_ids, state, instance_tags): """ Starts or stops a list of existing instances @@ -1154,6 +1163,8 @@ def startstop_instances(module, ec2, instance_ids, state): ec2: authenticated ec2 connection object instance_ids: The list of instances to start in the form of [ {id: }, ..] + instance_tags: A dict of tag keys and values in the form of + {key: value, ... } state: Intended state ("running" or "stopped") Returns a dictionary of instance information @@ -1162,19 +1173,33 @@ def startstop_instances(module, ec2, instance_ids, state): If the instance was not able to change state, "changed" will be set to False. + Note that if instance_ids and instance_tags are both non-empty, + this method will process the intersection of the two """ - + wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) changed = False instance_dict_array = [] - + if not isinstance(instance_ids, list) or len(instance_ids) < 1: - module.fail_json(msg='instance_ids should be a list of instances, aborting') + # Fail unless the user defined instance tags + if not instance_tags: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + # To make an EC2 tag filter, we need to prepend 'tag:' to each key. + # An empty filter does no filtering, so it's safe to pass it to the + # get_all_instances method even if the user did not specify instance_tags + filters = {} + if instance_tags: + for key, value in instance_tags.items(): + filters["tag:" + key] = value + + # Check that our instances are not in the state we want to take # Check (and eventually change) instances attributes and instances state running_instances_array = [] - for res in ec2.get_all_instances(instance_ids): + for res in ec2.get_all_instances(instance_ids, filters=filters): for inst in res.instances: # Check "source_dest_check" attribute @@ -1295,11 +1320,12 @@ def main(): (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids) elif state in ('running', 'stopped'): - instance_ids = module.params['instance_ids'] - if not instance_ids: - module.fail_json(msg='instance_ids list is requried for %s state' % state) + instance_ids = module.params.get('instance_ids') + instance_tags = module.params.get('instance_tags') + if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): + module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) - (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state) + (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags) elif state == 'present': # Changed is always set to true when provisioning new instances From 5913f5e5e125a871f68ae8564a8e14a634b89635 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 18 Aug 2015 14:59:35 -0400 Subject: [PATCH 327/386] deal with more failures when apt module fails to instantiate pkg fixes #1499 --- packaging/os/apt.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 19a7c426f5e..92b0f2fb8fd 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -403,19 +403,20 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options): for deb_file in debs.split(','): try: pkg = apt.debfile.DebPackage(deb_file) - except SystemError, e: - m.fail_json(msg="System Error: %s" % str(e)) - # Check if it's already installed - if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME: - continue - # Check if package is installable - if not pkg.check() and not force: - m.fail_json(msg=pkg._failure_string) + # Check if it's already installed + if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME: + continue + # Check if package is installable + if not pkg.check() and not force: + m.fail_json(msg=pkg._failure_string) - # add any missing deps to the list of deps we need - # to install so they're all done in one shot - deps_to_install.extend(pkg.missing_deps) + # add any missing deps to the list of deps we need + # to install so they're all done in one shot + deps_to_install.extend(pkg.missing_deps) + + except Exception, e: + m.fail_json(msg="Unable to install package: %s" % str(e)) # and add this deb to the list of packages to install pkgs_to_install.append(deb_file) From 50c6425673ebe70a44e9c48165828a058cabcd72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Wed, 19 Aug 2015 00:24:08 +0000 Subject: [PATCH 328/386] Fix minor whitespace issues --- files/acl.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/files/acl.py b/files/acl.py index 8b93da1661f..14851225348 100644 --- a/files/acl.py +++ b/files/acl.py @@ -21,7 +21,7 @@ module: acl version_added: "1.4" short_description: Sets and retrieves file ACL information. description: - - Sets and retrieves file ACL information. + - Sets and retrieves file ACL information. notes: - As of Ansible 2.0, this module only supports Linux distributions. options: @@ -122,6 +122,7 @@ acl: sample: [ "user::rwx", "group::rwx", "other::rwx" ] ''' + def split_entry(entry): ''' splits entry and ensures normalized return''' @@ -161,7 +162,7 @@ def build_entry(etype, entity, permissions=None): def build_command(module, mode, path, follow, default, recursive, entry=''): - '''Builds and returns agetfacl/setfacl command.''' + '''Builds and returns a getfacl/setfacl command.''' if mode == 'set': cmd = [module.get_bin_path('setfacl', True)] cmd.append('-m "%s"' % entry) From 421d3f12cf37f601943da12ea44ec941bb4ef9f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Wed, 19 Aug 2015 00:25:18 +0000 Subject: [PATCH 329/386] Fix wrong processing of lines returned by setfacl in test mode --- files/acl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/acl.py b/files/acl.py index 14851225348..64cc2281d3d 100644 --- a/files/acl.py +++ b/files/acl.py @@ -199,8 +199,8 @@ def acl_changed(module, cmd): for line in lines: if not line.endswith('*,*'): - return False - return True + return True + return False def run_acl(module, cmd, check_rc=True): From 3ac990556d5ccf9c9d5e8e3c5e1ff41cbbb726f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Wed, 19 Aug 2015 00:26:04 +0000 Subject: [PATCH 330/386] Fix wrong expectation regarding entry format in acl module --- files/acl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/acl.py b/files/acl.py index 64cc2281d3d..7d1b96b9e97 100644 --- a/files/acl.py +++ b/files/acl.py @@ -276,10 +276,10 @@ def main(): if etype or entity or permissions: module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.") - if state == 'present' and entry.count(":") != 3: + if state == 'present' and entry.count(":") != 2: module.fail_json(msg="'entry' MUST have 3 sections divided by ':' when 'state=present'.") - if state == 'absent' and entry.count(":") != 2: + if state == 'absent' and entry.count(":") != 1: module.fail_json(msg="'entry' MUST have 2 sections divided by ':' when 'state=absent'.") default, etype, entity, permissions = split_entry(entry) From dddb5270c8545c19c6619575bf99166ba6819e20 Mon Sep 17 00:00:00 2001 From: Sam Yaple Date: Wed, 19 Aug 2015 04:22:31 +0000 Subject: [PATCH 331/386] Refix bug 1226 after revert This patch properly fixes bug 1226 without introducing a breaking change to idempotency which was introduced in PR #1358 We can properly assign permissions to databases with a '.' in the name of the database as well as assign priviliges to all databases as specified with '*' --- database/mysql/mysql_user.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 4f0dee5374c..5ea825f7514 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -320,6 +320,11 @@ def privileges_unpack(priv): privs = [] for item in priv.strip().split('/'): pieces = item.strip().split(':') + dbpriv = pieces[0].rsplit(".", 1) + # Do not escape if privilege is for database '*' (all databases) + if dbpriv[0].strip('`') != '*': + pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1]) + if '.' in pieces[0]: pieces[0] = pieces[0].split('.') for idx, piece in enumerate(pieces): From 54804d25e31263a332c66e62d3904cfbd43c958d Mon Sep 17 00:00:00 2001 From: Sam Yaple Date: Wed, 19 Aug 2015 08:52:05 +0000 Subject: [PATCH 332/386] Actually remove the section we are replacing --- database/mysql/mysql_user.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 5ea825f7514..1ea54b41b3a 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -325,13 +325,6 @@ def privileges_unpack(priv): if dbpriv[0].strip('`') != '*': pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1]) - if '.' in pieces[0]: - pieces[0] = pieces[0].split('.') - for idx, piece in enumerate(pieces): - if pieces[0][idx] != "*": - pieces[0][idx] = "`" + pieces[0][idx] + "`" - pieces[0] = '.'.join(pieces[0]) - if '(' in pieces[1]: output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper()) for i in output[pieces[0]]: From 8215aad3b3345be74d1a4f9dc90bc8019d48fb81 Mon Sep 17 00:00:00 2001 From: Jason Cowley Date: Wed, 19 Aug 2015 11:30:21 -0700 Subject: [PATCH 333/386] Add support for S3 canned permissions. resolves #1939 --- cloud/amazon/s3.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 5c97031c09c..811978a0f0e 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -93,6 +93,12 @@ options: - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. required: false default: null + permission: + description: + - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. + required: false + default: private + version_added: "2.0" prefix: description: - Limits the response to keys that begin with the specified prefix for list mode @@ -167,7 +173,7 @@ EXAMPLES = ''' - s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472 # Create an empty bucket -- s3: bucket=mybucket mode=create +- s3: bucket=mybucket mode=create permission=public-read # Create a bucket with key as directory, in the EU region - s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1 @@ -236,6 +242,7 @@ def create_bucket(module, s3, bucket, location=None): location = Location.DEFAULT try: bucket = s3.create_bucket(bucket, location=location) + bucket.set_acl(module.params.get('permission')) except s3.provider.storage_response_error, e: module.fail_json(msg= str(e)) if bucket: @@ -306,6 +313,7 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, heade key.set_metadata(meta_key, metadata[meta_key]) key.set_contents_from_filename(src, encrypt_key=encrypt, headers=headers) + key.set_acl(module.params.get('permission')) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) except s3.provider.storage_copy_error, e: @@ -378,6 +386,7 @@ def main(): metadata = dict(type='dict'), mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), object = dict(), + permission = dict(choices=['private', 'public-read', 'public-read-write', 'authenticated-read'], default='private'), version = dict(default=None), overwrite = dict(aliases=['force'], default='always'), prefix = dict(default=None), From 4171d167f481928b6d14c66749fe9be5a0595543 Mon Sep 17 00:00:00 2001 From: Nithy Renganathan Date: Thu, 20 Aug 2015 14:24:05 +0000 Subject: [PATCH 334/386] Handle the changed value --- cloud/openstack/keystone_user.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cloud/openstack/keystone_user.py b/cloud/openstack/keystone_user.py index 2596eab980c..babcc3cc569 100644 --- a/cloud/openstack/keystone_user.py +++ b/cloud/openstack/keystone_user.py @@ -256,11 +256,14 @@ def ensure_role_exists(keystone, role_name): # Get the role if it exists try: role = get_role(keystone, role_name) + # Role does exist, we're done + return (False, role.id) except KeyError: # Role doesn't exist yet - role = keystone.roles.create(role_name) - return (True, role.id) + pass + role = keystone.roles.create(role_name) + return (True, role.id) def ensure_user_role_exists(keystone, user_name, tenant_name, role_name, check_mode): @@ -397,9 +400,9 @@ def dispatch(keystone, user=None, password=None, tenant=None, changed = False id = None if not tenant and not user and role and state == "present": - ensure_role_exists(keystone, role) + changed, id = ensure_role_exists(keystone, role) elif not tenant and not user and role and state == "absent": - ensure_role_absent(keystone, role) + changed = ensure_role_absent(keystone, role) elif tenant and not user and not role and state == "present": changed, id = ensure_tenant_exists(keystone, tenant, tenant_description, check_mode) From e4a5f3b4099765a103ab949741312e520e8d35d6 Mon Sep 17 00:00:00 2001 From: Brian Richards Date: Thu, 20 Aug 2015 11:07:58 -0500 Subject: [PATCH 335/386] Preventing servers that are deleted and left in the deleted state from being included in the server list --- cloud/rackspace/rax_facts.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/rackspace/rax_facts.py b/cloud/rackspace/rax_facts.py index c30df5b9462..481732c0af7 100644 --- a/cloud/rackspace/rax_facts.py +++ b/cloud/rackspace/rax_facts.py @@ -97,7 +97,9 @@ def rax_facts(module, address, name, server_id): servers.append(cs.servers.get(server_id)) except Exception, e: pass - + + servers[:] = [server for server in servers if server.status != "DELETED"] + if len(servers) > 1: module.fail_json(msg='Multiple servers found matching provided ' 'search parameters') From 9d4694122deeb3a5f5f193dcf32851f36decd73c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 20 Aug 2015 13:02:29 -0700 Subject: [PATCH 336/386] Return change results in a dictionary listing the package names. Fix a parsing problem when package names contain a dot. --- packaging/os/yum.py | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index cf321b31d13..5778bc38c3c 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -609,10 +609,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): except Exception, e: module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) - for p in pkgs: - # take note of which packages are getting installed - res['results'].append('%s will be installed' % p) - module.exit_json(changed=True, results=res['results']) + module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs)) changed = True @@ -680,10 +677,7 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): cmd = yum_basecmd + ["remove"] + pkgs if module.check_mode: - # take note of which packages are getting removed - for p in pkgs: - res['results'].append('%s will be removed' % p) - module.exit_json(changed=True, results=res['results']) + module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs)) rc, out, err = module.run_command(cmd) @@ -745,7 +739,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): continue else: pkg, version, repo = line - name, dist = pkg.split('.') + name, dist = pkg.rsplit('.', 1) updates.update({name: {'version': version, 'dist': dist, 'repo': repo}}) elif rc == 1: res['msg'] = err @@ -800,15 +794,15 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # check_mode output if module.check_mode: + to_update = [] for w in will_update: if w.startswith('@'): + to_update.append((w, None)) msg = '%s will be updated' % w else: - msg = '%s will be updated with %s-%s.%s from %s' % (w, w, updates[w]['version'], updates[w]['dist'], updates[w]['repo']) - res['results'].append(msg) + to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo']))) - for p in pkgs['install']: - res['results'].append('%s will be installed' % p) + res['changes'] = dict(installed=pkgs['install'], updated=to_update) if len(will_update) > 0 or len(pkgs['install']) > 0: res['changed'] = True From e95bcaeb8a98ec280acccadbb5491efc0c1679af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Thu, 20 Aug 2015 22:22:28 +0000 Subject: [PATCH 337/386] Remove support for `d[efault]:` in entry permissions It is not documented in [the Ansible doc page][1] nor [the BSD setfacl man entry][2] (which means it might not be compatible with BSD) so removing it does not break the API. On the other hand, it does not conform with POSIX 1003.1e DRAFT STANDARD 17 according to the [Linux setfacl man entry][3] so safer to remove. Finally, the most important reason: in non POSIX 1003.e mode, only ACL entries without the permissions field are accepted, so having an optional field here is very much error-prone. [1]: http://docs.ansible.com/ansible/acl_module.html [2]: http://www.freebsd.org/cgi/man.cgi?format=html&query=setfacl(1) [3]: http://linuxcommand.org/man_pages/setfacl1.html --- files/acl.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/files/acl.py b/files/acl.py index 7d1b96b9e97..2bd27f621f3 100644 --- a/files/acl.py +++ b/files/acl.py @@ -128,17 +128,12 @@ def split_entry(entry): a = entry.split(':') a.reverse() - if len(a) == 3: - a.append(False) try: - p, e, t, d = a + p, e, t = a except ValueError, e: print "wtf?? %s => %s" % (entry, a) raise e - if d: - d = True - if t.startswith("u"): t = "user" elif t.startswith("g"): @@ -150,7 +145,7 @@ def split_entry(entry): else: t = None - return [d, t, e, p] + return [t, e, p] def build_entry(etype, entity, permissions=None): @@ -282,7 +277,7 @@ def main(): if state == 'absent' and entry.count(":") != 1: module.fail_json(msg="'entry' MUST have 2 sections divided by ':' when 'state=absent'.") - default, etype, entity, permissions = split_entry(entry) + etype, entity, permissions = split_entry(entry) changed = False msg = "" From 4721d6d8b5e251054b01ddaf8bb852e8204c2c9f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 20 Aug 2015 15:30:32 -0700 Subject: [PATCH 338/386] Fix for the case where plugins aren't loaded on old RHEL systems --- packaging/os/yum.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 423f59981ae..c66e73ad98b 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -972,10 +972,15 @@ def main(): # loaded and plugins are discovered my.conf repoquery = None - if 'rhnplugin' in my.plugins._plugins: - repoquerybin = ensure_yum_utils(module) - if repoquerybin: - repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] + try: + yum_plugins = my.plugins._plugins + except AttributeError: + pass + else: + if 'rhnplugin' in yum_plugins: + repoquerybin = ensure_yum_utils(module) + if repoquerybin: + repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] pkg = [ p.strip() for p in params['name']] exclude = params['exclude'] From 0e659ad8723789310446221947256e33780e77d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Thu, 20 Aug 2015 22:40:15 +0000 Subject: [PATCH 339/386] Make sure permission-less entries are accepted when state=absent Also, remove that try condition as, at that stage, no permissions with other than 2 or 3 fields are sent to the function. --- files/acl.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/files/acl.py b/files/acl.py index 2bd27f621f3..58550c19124 100644 --- a/files/acl.py +++ b/files/acl.py @@ -127,12 +127,10 @@ def split_entry(entry): ''' splits entry and ensures normalized return''' a = entry.split(':') - a.reverse() - try: - p, e, t = a - except ValueError, e: - print "wtf?? %s => %s" % (entry, a) - raise e + if len(a) == 2: + a.append(None) + + t, e, p = a if t.startswith("u"): t = "user" From 8eefd44aefb2b17679fe1d292eb48af52cce094a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Thu, 20 Aug 2015 22:43:15 +0000 Subject: [PATCH 340/386] Make sure entry is not sent when acl state=query --- files/acl.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/files/acl.py b/files/acl.py index 58550c19124..06fd304e361 100644 --- a/files/acl.py +++ b/files/acl.py @@ -275,6 +275,9 @@ def main(): if state == 'absent' and entry.count(":") != 1: module.fail_json(msg="'entry' MUST have 2 sections divided by ':' when 'state=absent'.") + if state == 'query': + module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.") + etype, entity, permissions = split_entry(entry) changed = False From 72fb7a0a172e648bbeb4597e109df0238ac7cec9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Thu, 20 Aug 2015 23:35:53 +0000 Subject: [PATCH 341/386] Fix physical walk on acl module for Linux `-h` is for BSD [1] while `-P`/`--physical` is for Linux [2]. This commit fixes that option now that acl module is (temporarily) only supported for Linux. I will re-add `-h` when fixing BSD support. [1]: http://www.freebsd.org/cgi/man.cgi?format=html&query=setfacl(1) [2]: http://linuxcommand.org/man_pages/setfacl1.html --- files/acl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/acl.py b/files/acl.py index 06fd304e361..ad0f4607609 100644 --- a/files/acl.py +++ b/files/acl.py @@ -172,7 +172,7 @@ def build_command(module, mode, path, follow, default, recursive, entry=''): cmd.append('--recursive') if not follow: - cmd.append('-h') + cmd.append('--physical') if default: if(mode == 'rm'): From 428550e179f7a57202b452ba3e530b3c791f695e Mon Sep 17 00:00:00 2001 From: Simon Li Date: Fri, 21 Aug 2015 17:55:28 +0100 Subject: [PATCH 342/386] Don't fail in check_mode if user exists PR #1651 fixed issue #1515 but the requirement for path to be defined is unecessarily strict. If the user has previously been created a path isn't necessary. --- system/authorized_key.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index f9f773d8d90..376cf4c61dc 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -169,16 +169,15 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): :return: full path string to authorized_keys for user """ - if module.check_mode: - if path is None: - module.fail_json(msg="You must provide full path to key file in check mode") - else: - keysfile = path - return keysfile + if module.check_mode and path is not None: + keysfile = path + return keysfile try: user_entry = pwd.getpwnam(user) except KeyError, e: + if module.check_mode and path is None: + module.fail_json(msg="Either user must exist or you must provide full path to key file in check mode") module.fail_json(msg="Failed to lookup user %s: %s" % (user, str(e))) if path is None: homedir = user_entry.pw_dir From f7f621839ad2063c707506f7eaf5663f113664a7 Mon Sep 17 00:00:00 2001 From: Bruno Galindro da Costa Date: Fri, 21 Aug 2015 14:13:09 -0300 Subject: [PATCH 343/386] Added termination_policies option --- cloud/amazon/ec2_asg.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index efcd66606b8..e67d2a07d39 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -126,6 +126,13 @@ options: version_added: "1.9" default: yes required: False + termination_policies: + description: + - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. + required: false + default: Default. E.g.: When used to create a new autoscaling group, the “Default” value is used. When used to change an existent autoscaling group, the current termination policies are mantained + choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] + version_added: "2.0" extends_documentation_fragment: aws """ @@ -421,7 +428,8 @@ def create_autoscaling_group(connection, module): tags=asg_tags, health_check_period=health_check_period, health_check_type=health_check_type, - default_cooldown=default_cooldown) + default_cooldown=default_cooldown, + termination_policies=termination_policies) try: connection.create_auto_scaling_group(ag) @@ -783,7 +791,8 @@ def main(): health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), - wait_for_instances=dict(type='bool', default=True) + wait_for_instances=dict(type='bool', default=True), + termination_policies=dict(type='list', default=None) ), ) From 2a5f3754e78664d96529f18b9f7bebce6722c629 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Fri, 21 Aug 2015 13:37:29 -0700 Subject: [PATCH 344/386] added windows facts ansible_lastboot, ansible_uptime_seconds switched OS object to Get-CimInstance since we need a DateTime object for lastbootuptime --- windows/setup.ps1 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/windows/setup.ps1 b/windows/setup.ps1 index 32b4d865263..bd2f6ac8c76 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -25,7 +25,7 @@ $result = New-Object psobject @{ changed = $false }; -$win32_os = Get-WmiObject Win32_OperatingSystem +$win32_os = Get-CimInstance Win32_OperatingSystem $osversion = [Environment]::OSVersion $memory = @() $memory += Get-WmiObject win32_Physicalmemory @@ -66,6 +66,9 @@ Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version Set-Attr $result.ansible_facts "ansible_totalmem" $capacity +Set-Attr $result.ansible_facts "ansible_lastboot" $win32_os.lastbootuptime.ToString("u") +Set-Attr $result.ansible_facts "ansible_uptime_seconds" $([System.Convert]::ToInt64($(Get-Date).Subtract($win32_os.lastbootuptime).TotalSeconds)) + $ips = @() Foreach ($ip in $netcfg.IPAddress) { If ($ip) { $ips += $ip } } Set-Attr $result.ansible_facts "ansible_ip_addresses" $ips From 85ddb1b90232dbd68798e9b2d7dafa5689a1d30e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 20 Aug 2015 17:32:05 -0400 Subject: [PATCH 345/386] Fixing region requirement regarding euca clusters Fixes ansible/ansible#11023 --- cloud/amazon/ec2.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 55c45a647f4..c2b57eb7cd3 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -824,7 +824,10 @@ def create_instances(module, ec2, vpc, override_count=None): vpc_id = None if vpc_subnet_id: - vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + if not vpc: + module.fail_json(msg="region must be specified") + else: + vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id else: vpc_id = None @@ -1281,7 +1284,7 @@ def main(): except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) else: - module.fail_json(msg="region must be specified") + vpc = None tagged_instances = [] From 1d074d43aa8584f8de01edaee1ffe456a5ab4844 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Sat, 22 Aug 2015 19:01:11 -0400 Subject: [PATCH 346/386] * Update core modules to fix strict mode errors. * Also fix creates parameter issue in win_msi as described in https://github.com/ansible/ansible-modules-core/issues/129, slightly different fix from https://github.com/ansible/ansible-modules-core/pull/1482 * Fix setup.ps1 module issue described in https://github.com/ansible/ansible-modules-core/issues/1927 --- windows/setup.ps1 | 2 +- windows/win_feature.ps1 | 50 ++++++++------------------------------ windows/win_group.ps1 | 28 ++++++++++------------ windows/win_msi.ps1 | 32 ++++++++++--------------- windows/win_service.ps1 | 17 +++++++------ windows/win_stat.ps1 | 8 +++---- windows/win_user.ps1 | 53 ++++++++++++++--------------------------- 7 files changed, 65 insertions(+), 125 deletions(-) diff --git a/windows/setup.ps1 b/windows/setup.ps1 index bd2f6ac8c76..3e3317d0450 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -60,7 +60,7 @@ Set-Attr $result.ansible_facts "ansible_hostname" $env:COMPUTERNAME; Set-Attr $result.ansible_facts "ansible_fqdn" "$([System.Net.Dns]::GetHostByName((hostname)).HostName)" Set-Attr $result.ansible_facts "ansible_system" $osversion.Platform.ToString() Set-Attr $result.ansible_facts "ansible_os_family" "Windows" -Set-Attr $result.ansible_facts "ansible_os_name" $win32_os.Name.Split('|')[0] +Set-Attr $result.ansible_facts "ansible_os_name" ($win32_os.Name.Split('|')[0]).Trim() Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString() diff --git a/windows/win_feature.ps1 b/windows/win_feature.ps1 index 458d942e328..ec6317fb89b 100644 --- a/windows/win_feature.ps1 +++ b/windows/win_feature.ps1 @@ -27,48 +27,18 @@ $result = New-Object PSObject -Property @{ changed = $false } -If ($params.name) { - $name = $params.name -split ',' | % { $_.Trim() } -} -Else { - Fail-Json $result "mising required argument: name" +$name = Get-Attr $params "name" -failifempty $true +$name = $name -split ',' | % { $_.Trim() } + +$state = Get-Attr $params "state" "present" +$state = $state.ToString().ToLower() +If (($state -ne 'present') -and ($state -ne 'absent')) { + Fail-Json $result "state is '$state'; must be 'present' or 'absent'" } -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent')) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" - } -} -Elseif (!$params.state) { - $state = "present" -} - -If ($params.restart) { - $restart = $params.restart | ConvertTo-Bool -} -Else -{ - $restart = $false -} - -if ($params.include_sub_features) -{ - $includesubfeatures = $params.include_sub_features | ConvertTo-Bool -} -Else -{ - $includesubfeatures = $false -} - -if ($params.include_management_tools) -{ - $includemanagementtools = $params.include_management_tools | ConvertTo-Bool -} -Else -{ - $includemanagementtools = $false -} +$restart = Get-Attr $params "restart" $false | ConvertTo-Bool +$includesubfeatures = Get-Attr $params "include_sub_features" $false | ConvertTo-Bool +$includemanagementtools = Get-Attr $params "include_management_tools" $false | ConvertTo-Bool If ($state -eq "present") { try { diff --git a/windows/win_group.ps1 b/windows/win_group.ps1 index febaf47d014..c3fc920c916 100644 --- a/windows/win_group.ps1 +++ b/windows/win_group.ps1 @@ -24,35 +24,31 @@ $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; -If (-not $params.name.GetType) { - Fail-Json $result "missing required arguments: name" +$name = Get-Attr $params "name" -failifempty $true + +$state = Get-Attr $params "state" "present" +$state = $state.ToString().ToLower() +If (($state -ne "present") -and ($state -ne "absent")) { + Fail-Json $result "state is '$state'; must be 'present' or 'absent'" } -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne "present") -and ($state -ne "absent")) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" - } -} -Elseif (-not $params.state) { - $state = "present" -} +$description = Get-Attr $params "description" $null $adsi = [ADSI]"WinNT://$env:COMPUTERNAME" -$group = $adsi.Children | Where-Object {$_.SchemaClassName -eq 'group' -and $_.Name -eq $params.name } +$group = $adsi.Children | Where-Object {$_.SchemaClassName -eq 'group' -and $_.Name -eq $name } try { If ($state -eq "present") { If (-not $group) { - $group = $adsi.Create("Group", $params.name) + $group = $adsi.Create("Group", $name) $group.SetInfo() Set-Attr $result "changed" $true } - If ($params.description.GetType) { - IF (-not $group.description -or $group.description -ne $params.description) { - $group.description = $params.description + If ($null -ne $description) { + IF (-not $group.description -or $group.description -ne $description) { + $group.description = $description $group.SetInfo() Set-Attr $result "changed" $true } diff --git a/windows/win_msi.ps1 b/windows/win_msi.ps1 index 1c2bc8a3019..f1381e9bf23 100644 --- a/windows/win_msi.ps1 +++ b/windows/win_msi.ps1 @@ -21,36 +21,28 @@ $params = Parse-Args $args; -$result = New-Object psobject; -Set-Attr $result "changed" $false; +$path = Get-Attr $params "path" -failifempty $true +$state = Get-Attr $params "state" "present" +$creates = Get-Attr $params "creates" $false +$extra_args = Get-Attr $params "extra_args" "" -If (-not $params.path.GetType) -{ - Fail-Json $result "missing required arguments: path" -} +$result = New-Object psobject @{ + changed = $false +}; -$extra_args = "" -If ($params.extra_args.GetType) +If (($creates -ne $false) -and ($state -ne "absent") -and (Test-Path $creates)) { - $extra_args = $params.extra_args; -} - -If ($params.creates.GetType -and $params.state.GetType -and $params.state -ne "absent") -{ - If (Test-File $creates) - { - Exit-Json $result; - } + Exit-Json $result; } $logfile = [IO.Path]::GetTempFileName(); -if ($params.state.GetType -and $params.state -eq "absent") +if ($state -eq "absent") { - msiexec.exe /x $params.path /qb /l $logfile $extra_args; + msiexec.exe /x $path /qn /l $logfile $extra_args } Else { - msiexec.exe /i $params.path /qb /l $logfile $extra_args; + msiexec.exe /i $path /qn /l $logfile $extra_args } Set-Attr $result "changed" $true; diff --git a/windows/win_service.ps1 b/windows/win_service.ps1 index a70d82a4ef3..4ea4e2697a1 100644 --- a/windows/win_service.ps1 +++ b/windows/win_service.ps1 @@ -24,26 +24,25 @@ $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; -If (-not $params.name.GetType) -{ - Fail-Json $result "missing required arguments: name" -} +$name = Get-Attr $params "name" -failifempty $true +$state = Get-Attr $params "state" $false +$startMode = Get-Attr $params "start_mode" $false -If ($params.state) { - $state = $params.state.ToString().ToLower() +If ($state) { + $state = $state.ToString().ToLower() If (($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted')) { Fail-Json $result "state is '$state'; must be 'started', 'stopped', or 'restarted'" } } -If ($params.start_mode) { - $startMode = $params.start_mode.ToString().ToLower() +If ($startMode) { + $startMode = $startMode.ToString().ToLower() If (($startMode -ne 'auto') -and ($startMode -ne 'manual') -and ($startMode -ne 'disabled')) { Fail-Json $result "start mode is '$startMode'; must be 'auto', 'manual', or 'disabled'" } } -$svcName = $params.name +$svcName = $name $svc = Get-Service -Name $svcName -ErrorAction SilentlyContinue If (-not $svc) { Fail-Json $result "Service '$svcName' not installed" diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index cf8c14a4d49..af9cbd7eca5 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -42,14 +42,14 @@ If (Test-Path $path) Set-Attr $result.stat "exists" $TRUE; $info = Get-Item $path; $epoch_date = Get-Date -Date "01/01/1970" - If ($info.Directory) # Only files have the .Directory attribute. + If ($info.PSIsContainer) { - Set-Attr $result.stat "isdir" $FALSE; - Set-Attr $result.stat "size" $info.Length; + Set-Attr $result.stat "isdir" $TRUE; } Else { - Set-Attr $result.stat "isdir" $TRUE; + Set-Attr $result.stat "isdir" $FALSE; + Set-Attr $result.stat "size" $info.Length; } Set-Attr $result.stat "extension" $info.Extension; Set-Attr $result.stat "attributes" $info.Attributes.ToString(); diff --git a/windows/win_user.ps1 b/windows/win_user.ps1 index b7be7e4eea3..ac40ced2cbc 100644 --- a/windows/win_user.ps1 +++ b/windows/win_user.ps1 @@ -16,7 +16,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# WANT_JSON # POWERSHELL_COMMON ######## @@ -55,33 +54,21 @@ $result = New-Object psobject @{ changed = $false }; -If (-not $params.name.GetType) { - Fail-Json $result "missing required arguments: name" -} - -$username = Get-Attr $params "name" +$username = Get-Attr $params "name" -failifempty $true $fullname = Get-Attr $params "fullname" $description = Get-Attr $params "description" $password = Get-Attr $params "password" -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent') -and ($state -ne 'query')) { - Fail-Json $result "state is '$state'; must be 'present', 'absent' or 'query'" - } -} -ElseIf (!$params.state) { - $state = "present" +$state = Get-Attr $params "state" "present" +$state = $state.ToString().ToLower() +If (($state -ne 'present') -and ($state -ne 'absent') -and ($state -ne 'query')) { + Fail-Json $result "state is '$state'; must be 'present', 'absent' or 'query'" } -If ($params.update_password) { - $update_password = $params.update_password.ToString().ToLower() - If (($update_password -ne 'always') -and ($update_password -ne 'on_create')) { - Fail-Json $result "update_password is '$update_password'; must be 'always' or 'on_create'" - } -} -ElseIf (!$params.update_password) { - $update_password = "always" +$update_password = Get-Attr $params "update_password" "always" +$update_password = $update_password.ToString().ToLower() +If (($update_password -ne 'always') -and ($update_password -ne 'on_create')) { + Fail-Json $result "update_password is '$update_password'; must be 'always' or 'on_create'" } $password_expired = Get-Attr $params "password_expired" $null @@ -126,14 +113,10 @@ If ($groups -ne $null) { } } -If ($params.groups_action) { - $groups_action = $params.groups_action.ToString().ToLower() - If (($groups_action -ne 'replace') -and ($groups_action -ne 'add') -and ($groups_action -ne 'remove')) { - Fail-Json $result "groups_action is '$groups_action'; must be 'replace', 'add' or 'remove'" - } -} -ElseIf (!$params.groups_action) { - $groups_action = "replace" +$groups_action = Get-Attr $params "groups_action" "replace" +$groups_action = $groups_action.ToString().ToLower() +If (($groups_action -ne 'replace') -and ($groups_action -ne 'add') -and ($groups_action -ne 'remove')) { + Fail-Json $result "groups_action is '$groups_action'; must be 'replace', 'add' or 'remove'" } $user_obj = Get-User $username @@ -141,7 +124,7 @@ $user_obj = Get-User $username If ($state -eq 'present') { # Add or update user try { - If (!$user_obj.GetType) { + If (-not $user_obj -or -not $user_obj.GetType) { $user_obj = $adsi.Create("User", $username) If ($password -ne $null) { $user_obj.SetPassword($password) @@ -200,13 +183,13 @@ If ($state -eq 'present') { If ($result.changed) { $user_obj.SetInfo() } - If ($groups.GetType) { + If ($null -ne $groups) { [string[]]$current_groups = $user_obj.Groups() | ForEach { $_.GetType().InvokeMember("Name", "GetProperty", $null, $_, $null) } If (($groups_action -eq "remove") -or ($groups_action -eq "replace")) { ForEach ($grp in $current_groups) { If ((($groups_action -eq "remove") -and ($groups -contains $grp)) -or (($groups_action -eq "replace") -and ($groups -notcontains $grp))) { $group_obj = $adsi.Children | where { $_.SchemaClassName -eq 'Group' -and $_.Name -eq $grp } - If ($group_obj.GetType) { + If ($group_obj -and $group_obj.GetType) { $group_obj.Remove($user_obj.Path) $result.changed = $true } @@ -239,7 +222,7 @@ If ($state -eq 'present') { ElseIf ($state -eq 'absent') { # Remove user try { - If ($user_obj.GetType) { + If ($user_obj -and $user_obj.GetType) { $username = $user_obj.Name.Value $adsi.delete("User", $user_obj.Name.Value) $result.changed = $true @@ -252,7 +235,7 @@ ElseIf ($state -eq 'absent') { } try { - If ($user_obj.GetType) { + If ($user_obj -and $user_obj.GetType) { $user_obj.RefreshCache() Set-Attr $result "name" $user_obj.Name[0] Set-Attr $result "fullname" $user_obj.FullName[0] From daf7a0551beefab7047fb36a290c03bd828ec0d3 Mon Sep 17 00:00:00 2001 From: Omri Iluz Date: Sun, 23 Aug 2015 02:29:39 -0700 Subject: [PATCH 347/386] No need for .keys on volumes list Since https://github.com/ansible/ansible-modules-core/commit/c3f92cca210db1f7042bfce1ff90645255f0b49e changed "volumes" to be a list instead of a dictionary, we don't need (and cannot) .keys when appending to set. Reported as bug #1957 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e045e2ce1fc..82c39006678 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1020,7 +1020,7 @@ class DockerManager(object): expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys()) if self.volumes: - expected_volume_keys.update(self.volumes.keys()) + expected_volume_keys.update(self.volumes) actual_volume_keys = set((container['Config']['Volumes'] or {}).keys()) From 39e7e05a8dc04613bcf8f5d213ea1fe90452dc32 Mon Sep 17 00:00:00 2001 From: Till Backhaus Date: Mon, 24 Aug 2015 20:06:53 +0200 Subject: [PATCH 348/386] Delete dead and broken code --- cloud/amazon/s3.py | 9 --------- cloud/google/gc_storage.py | 9 --------- 2 files changed, 18 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 5c97031c09c..e98308bb874 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -281,15 +281,6 @@ def create_dirkey(module, s3, bucket, obj): except s3.provider.storage_response_error, e: module.fail_json(msg= str(e)) -def upload_file_check(src): - if os.path.exists(src): - file_exists is True - else: - file_exists is False - if os.path.isdir(src): - module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True) - return file_exists - def path_check(path): if os.path.exists(path): return True diff --git a/cloud/google/gc_storage.py b/cloud/google/gc_storage.py index 291d4ca0f4d..37d61b0b268 100644 --- a/cloud/google/gc_storage.py +++ b/cloud/google/gc_storage.py @@ -211,15 +211,6 @@ def create_dirkey(module, gs, bucket, obj): except gs.provider.storage_response_error, e: module.fail_json(msg= str(e)) -def upload_file_check(src): - if os.path.exists(src): - file_exists is True - else: - file_exists is False - if os.path.isdir(src): - module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True) - return file_exists - def path_check(path): if os.path.exists(path): return True From eb17b6a36c029b92f069b61f38c4bddd848f1be2 Mon Sep 17 00:00:00 2001 From: Marius Gedminas Date: Tue, 25 Aug 2015 19:15:33 +0300 Subject: [PATCH 349/386] apt: check for "0 upgraded" to be at the beginning of the line Fixes #1678. --- packaging/os/apt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) mode change 100644 => 100755 packaging/os/apt.py diff --git a/packaging/os/apt.py b/packaging/os/apt.py old mode 100644 new mode 100755 index 92b0f2fb8fd..1fd770f710e --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -179,8 +179,8 @@ APT_ENV_VARS = dict( ) DPKG_OPTIONS = 'force-confdef,force-confold' -APT_GET_ZERO = "0 upgraded, 0 newly installed" -APTITUDE_ZERO = "0 packages upgraded, 0 newly installed" +APT_GET_ZERO = "\n0 upgraded, 0 newly installed" +APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed" APT_LISTS_PATH = "/var/lib/apt/lists" APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp" From 4ae4331a6bfe716e65aec656bff0c51a78b02a40 Mon Sep 17 00:00:00 2001 From: Marius Gedminas Date: Wed, 26 Aug 2015 08:51:52 +0300 Subject: [PATCH 350/386] user: don't generate SSH keys in check mode Fixes https://github.com/ansible/ansible/issues/11768 Test plan: - (in a Vagrant VM) created a user 'bob' with no ssh key - ran the following playbook in check mode: --- - hosts: trusty tasks: - user: name=bob state=present generate_ssh_key=yes - saw that ansible-playbook reported "changes=1" - saw that /home/bob/.ssh was still absent - ran the playbook for real - saw that /home/bob/.ssh was created - ran the playbook in check mode again - saw that ansible-playbook reported no changes - tried a variation with a different username for a user that didn't exist: ansible-playbook --check worked correctly (no errors, reported "changed") --- system/user.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) mode change 100644 => 100755 system/user.py diff --git a/system/user.py b/system/user.py old mode 100644 new mode 100755 index 7e3e4c01cd3..45ce77381ce --- a/system/user.py +++ b/system/user.py @@ -577,11 +577,13 @@ class User(object): def ssh_key_gen(self): info = self.user_info() - if not os.path.exists(info[5]): + if not os.path.exists(info[5]) and not self.module.check_mode: return (1, '', 'User %s home directory does not exist' % self.name) ssh_key_file = self.get_ssh_key_path() ssh_dir = os.path.dirname(ssh_key_file) if not os.path.exists(ssh_dir): + if self.module.check_mode: + return (0, '', '') try: os.mkdir(ssh_dir, 0700) os.chown(ssh_dir, info[2], info[3]) @@ -589,6 +591,8 @@ class User(object): return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e))) if os.path.exists(ssh_key_file): return (None, 'Key already exists', '') + if self.module.check_mode: + return (0, '', '') cmd = [self.module.get_bin_path('ssh-keygen', True)] cmd.append('-t') cmd.append(self.ssh_type) @@ -2148,6 +2152,7 @@ def main(): # deal with ssh key if user.sshkeygen: + # generate ssh key (note: this function is check mode aware) (rc, out, err) = user.ssh_key_gen() if rc is not None and rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) From 0e42b1708bdfaa4cc5f192c8f68d0d0adb2b03c4 Mon Sep 17 00:00:00 2001 From: Timothy Appnel Date: Wed, 26 Aug 2015 11:53:39 -0400 Subject: [PATCH 351/386] Fixed call to undefined attribute when RDS module timeouts waiting. --- cloud/amazon/rds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 9e98f50230b..1755be9b1a1 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -610,7 +610,7 @@ def await_resource(conn, resource, status, module): while wait_timeout > time.time() and resource.status != status: time.sleep(5) if wait_timeout <= time.time(): - module.fail_json(msg="Timeout waiting for resource %s" % resource.id) + module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name) if module.params.get('command') == 'snapshot': # Temporary until all the rds2 commands have their responses parsed if resource.name is None: From 2ba32a8b1048f01e67cad68304440df25ca55975 Mon Sep 17 00:00:00 2001 From: Timothy Appnel Date: Wed, 26 Aug 2015 11:52:15 -0400 Subject: [PATCH 352/386] Clarified and cleaned up grammar of error messages. --- cloud/amazon/rds.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 1755be9b1a1..d56c4ae12de 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -614,12 +614,12 @@ def await_resource(conn, resource, status, module): if module.params.get('command') == 'snapshot': # Temporary until all the rds2 commands have their responses parsed if resource.name is None: - module.fail_json(msg="Problem with snapshot %s" % resource.snapshot) + module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot) resource = conn.get_db_snapshot(resource.name) else: # Temporary until all the rds2 commands have their responses parsed if resource.name is None: - module.fail_json(msg="Problem with instance %s" % resource.instance) + module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance) resource = conn.get_db_instance(resource.name) if resource is None: break @@ -653,7 +653,7 @@ def create_db_instance(module, conn): module.params.get('username'), module.params.get('password'), **params) changed = True except RDSException, e: - module.fail_json(msg="failed to create instance: %s" % e.message) + module.fail_json(msg="Failed to create instance: %s" % e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -680,7 +680,7 @@ def replicate_db_instance(module, conn): result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) changed = True except RDSException, e: - module.fail_json(msg="failed to create replica instance: %s " % e.message) + module.fail_json(msg="Failed to create replica instance: %s " % e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -719,7 +719,7 @@ def delete_db_instance_or_snapshot(module, conn): else: result = conn.delete_db_snapshot(snapshot) except RDSException, e: - module.fail_json(msg="failed to delete instance: %s" % e.message) + module.fail_json(msg="Failed to delete instance: %s" % e.message) # If we're not waiting for a delete to complete then we're all done # so just return @@ -745,11 +745,11 @@ def facts_db_instance_or_snapshot(module, conn): snapshot = module.params.get('snapshot') if instance_name and snapshot: - module.fail_json(msg="facts must be called with either instance_name or snapshot, not both") + module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both") if instance_name: resource = conn.get_db_instance(instance_name) if not resource: - module.fail_json(msg="DB Instance %s does not exist" % instance_name) + module.fail_json(msg="DB instance %s does not exist" % instance_name) if snapshot: resource = conn.get_db_snapshot(snapshot) if not resource: @@ -1037,7 +1037,7 @@ def main(): region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: - module.fail_json(msg="region not specified and unable to determine region from EC2_REGION.") + module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.") # connect to the rds endpoint if has_rds2: From a4aa29edd96e406385697525c883dbb399b18517 Mon Sep 17 00:00:00 2001 From: Luke Date: Fri, 28 Aug 2015 08:34:05 -0400 Subject: [PATCH 353/386] updated examples When testing with existing example, I received errors about auth block not being ingested. After adding the auth block, name and state with "=" caused syntax errors. --- cloud/openstack/os_network.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py index 75c431493f6..f911ce71af1 100644 --- a/cloud/openstack/os_network.py +++ b/cloud/openstack/os_network.py @@ -57,8 +57,13 @@ requirements: ["shade"] EXAMPLES = ''' - os_network: - name=t1network - state=present + name: t1network + state: present + auth: + auth_url: https://your_api_url.com:9000/v2.0 + username: user + password: password + project_name: someproject ''' From 40f2ff9fbf71b7a4330b5ddb56a207dec05d5dbf Mon Sep 17 00:00:00 2001 From: Luke Date: Fri, 28 Aug 2015 08:46:45 -0400 Subject: [PATCH 354/386] removed hyphens in module name in examples --- cloud/openstack/os_client_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 7128b06ffcb..67c58dfd6ca 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -40,12 +40,12 @@ author: "Monty Taylor (@emonty)" EXAMPLES = ''' # Get list of clouds that do not support security groups -- os-client-config: +- os_client_config: - debug: var={{ item }} with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}" # Get the information back just about the mordred cloud -- os-client-config: +- os_client_config: clouds: - mordred ''' From 48f522455da3a7232aa2f1c5cc3d71a11d243860 Mon Sep 17 00:00:00 2001 From: Tim Rupp Date: Fri, 28 Aug 2015 20:35:24 -0700 Subject: [PATCH 355/386] Add hostname support for Kali linux 2.0 This patch allows the hostname module to detect and set the hostname for a Kali Linux 2.0 installation. Without this patch, the hostname module raises the following error hostname module cannot be used on platform Linux (Kali) Kali is based off of Debian. --- system/hostname.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/system/hostname.py b/system/hostname.py index f986a91f8f3..9e7f6a4ef70 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -491,6 +491,11 @@ class DebianHostname(Hostname): distribution = 'Debian' strategy_class = DebianStrategy +class KaliHostname(Hostname): + platform = 'Linux' + distribution = 'Kali' + strategy_class = DebianStrategy + class UbuntuHostname(Hostname): platform = 'Linux' distribution = 'Ubuntu' From 5595a9f81d07f840438f52c0560726c87165f622 Mon Sep 17 00:00:00 2001 From: Marius Gedminas Date: Mon, 31 Aug 2015 09:08:35 +0300 Subject: [PATCH 356/386] authorized_key: fix example in documentation 'key=' cannot be pointing to a file name; it needs to be the key itself as a string (or a URL). --- system/authorized_key.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 376cf4c61dc..361e68cb009 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -112,8 +112,10 @@ EXAMPLES = ''' key_options='no-port-forwarding,from="10.0.1.1"' # Set up authorized_keys exclusively with one key -- authorized_key: user=root key=public_keys/doe-jane state=present +- authorized_key: user=root key="{{ item }}" state=present exclusive=yes + with_file: + - public_keys/doe-jane ''' # Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. From a18c96882e2713909206c82afc2e7754b206c35d Mon Sep 17 00:00:00 2001 From: Rick Mendes Date: Mon, 31 Aug 2015 09:06:18 -0700 Subject: [PATCH 357/386] using single device_id and enabling release on disassociation --- cloud/amazon/ec2_eip.py | 75 +++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 32 deletions(-) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index ae3cd06eaa7..5d6532b3955 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -22,14 +22,11 @@ description: - This module associates AWS EC2 elastic IP addresses with instances version_added: "1.4" options: - instance_id: + device_id: description: - - The EC2 instance id - required: false - network_interface_id: - description: - - The Elastic Network Interface (ENI) id + - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id. required: false + aliases: [ instance_id ] version_added: "2.0" public_ip: description: @@ -61,8 +58,15 @@ options: required: false default: false version_added: "1.6" + release_on_disassociation: + description: + - whether or not to automatically release the EIP when it is disassociated + required: false + default: false + version_added: "2.0" extends_documentation_fragment: aws author: "Lorin Hochstein (@lorin) " +author: "Rick Mendes (@rickmendes) " notes: - This module will return C(public_ip) on success, which will contain the public IP address associated with the instance. @@ -70,19 +74,22 @@ notes: the cloud instance is reachable via the new address. Use wait_for and pause to delay further playbook execution until the instance is reachable, if necessary. + - This module returns multiple changed statuses on disassociation or release. + It returns an overall status based on any changes occuring. It also returns + individual changed statuses for disassociation and release. ''' EXAMPLES = ''' - name: associate an elastic IP with an instance - ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 + ec2_eip: device_id=i-1212f003 ip=93.184.216.119 - name: associate an elastic IP with a device - ec2_eip: network_interface_id=eni-c8ad70f3 ip=93.184.216.119 + ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119 - name: disassociate an elastic IP from an instance - ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 state=absent + ec2_eip: device_id=i-1212f003 ip=93.184.216.119 state=absent - name: disassociate an elastic IP with a device - ec2_eip: network_interface_id=eni-c8ad70f3 ip=93.184.216.119 state=absent + ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119 state=absent - name: allocate a new elastic IP and associate it with an instance - ec2_eip: instance_id=i-1212f003 + ec2_eip: device_id=i-1212f003 - name: allocate a new elastic IP without associating it to anything action: ec2_eip register: eip @@ -95,7 +102,7 @@ EXAMPLES = ''' ''' group=webserver count=3 register: ec2 - name: associate new elastic IPs with each of the instances - ec2_eip: "instance_id={{ item }}" + ec2_eip: "device_id={{ item }}" with_items: ec2.instance_ids - name: allocate a new elastic IP inside a VPC in us-west-2 ec2_eip: region=us-west-2 in_vpc=yes @@ -292,14 +299,14 @@ def ensure_absent(ec2, domain, address, device_id, check_mode, isinstance=True): def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - instance_id=dict(required=False), - network_interface_id=dict(required=False), + device_id=dict(required=False, aliases=['instance_id']), public_ip=dict(required=False, aliases=['ip']), state=dict(required=False, default='present', choices=['present', 'absent']), in_vpc=dict(required=False, type='bool', default=False), reuse_existing_ip_allowed=dict(required=False, type='bool', default=False), + release_on_disassociation=dict(required=False, type='bool', default=False), wait_timeout=dict(default=300), )) @@ -313,42 +320,46 @@ def main(): ec2 = ec2_connect(module) - instance_id = module.params.get('instance_id') - network_interface_id = module.params.get('network_interface_id') + device_id = module.params.get('device_id') public_ip = module.params.get('public_ip') state = module.params.get('state') in_vpc = module.params.get('in_vpc') domain = 'vpc' if in_vpc else None reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') + release_on_disassociation = module.params.get('release_on_disassociation') + + if device_id and device_id.startswith('i-'): + is_instance=True + elif device_id: + is_instance=False try: - if network_interface_id: - address = find_address(ec2, public_ip, network_interface_id, isinstance=False) - elif instance_id: - address = find_address(ec2, public_ip, instance_id) + if device_id: + address = find_address(ec2, public_ip, device_id, isinstance=is_instance) else: address = False if state == 'present': - if instance_id: - result = ensure_present(ec2, domain, address, instance_id, + if device_id: + result = ensure_present(ec2, domain, address, device_id, reuse_existing_ip_allowed, - module.check_mode) - elif network_interface_id: - result = ensure_present(ec2, domain, address, network_interface_id, - reuse_existing_ip_allowed, - module.check_mode, isinstance=False) + module.check_mode, isinstance=is_instance) else: address = allocate_address(ec2, domain, reuse_existing_ip_allowed) result = {'changed': True, 'public_ip': address.public_ip} else: - if network_interface_id: - result = ensure_absent(ec2, domain, address, network_interface_id, module.check_mode, isinstance=False) - elif instance_id: - result = ensure_absent(ec2, domain, address, instance_id, module.check_mode) + if device_id: + disassociated = ensure_absent(ec2, domain, address, device_id, module.check_mode, isinstance=is_instance) + + if release_on_disassociation and disassociated['changed']: + released = release_address(ec2, address, module.check_mode) + result = { 'changed': True, 'disassociated': disassociated, 'released': released } + else: + result = { 'changed': disassociated['changed'], 'disassociated': disassociated, 'released': { 'changed': False } } else: address = find_address(ec2, public_ip, None) - result = release_address(ec2, address, module.check_mode) + released = release_address(ec2, address, module.check_mode) + result = { 'changed': released['changed'], 'disassociated': { 'changed': False }, 'released': released } except (boto.exception.EC2ResponseError, EIPException) as e: module.fail_json(msg=str(e)) From 06fc029f73ccd323e355424c016bbcbbc2fb8c80 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 31 Aug 2015 15:09:50 -0700 Subject: [PATCH 358/386] Remove non-ascii quote char --- cloud/amazon/ec2_lc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index e0d7d2c1a64..fa6c64490ad 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -124,7 +124,7 @@ options: version_added: "2.0" classic_link_vpc_security_groups: description: - - A list of security group id’s with which to associate the ClassicLink VPC instances. + - A list of security group id's with which to associate the ClassicLink VPC instances. required: false default: null version_added: "2.0" From 880dc307b018f2f741aad3d728caa2c0ee99d8d5 Mon Sep 17 00:00:00 2001 From: Colin Hutchinson Date: Tue, 1 Sep 2015 10:07:03 -0400 Subject: [PATCH 359/386] Remove the sample SHA256 putting a full sha256 made the width of the table in the generated docs not fit on some screens --- network/basics/get_url.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index db3c73b2cd7..0212e41a7db 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -81,7 +81,7 @@ options: - 'If a checksum is passed to this parameter, the digest of the destination file will be calculated after it is downloaded to ensure its integrity and verify that the transfer completed successfully. - Format: :, e.g.: checksum="sha256:d98291acbedd510e3dbd36dbfdd83cbca8415220af43b327c0a0c574b6dc7b97" + Format: :, e.g.: checksum="sha256:shagoeshere" If you worry about portability, only the sha1 algorithm is available on all platforms and python versions. The third party hashlib library can be installed for access to additional algorithms.' From 68ab025dac8ea0f9779f57fde2236bac6ce95084 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 19 Aug 2015 11:18:15 -0400 Subject: [PATCH 360/386] minor doc fixes --- files/assemble.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/files/assemble.py b/files/assemble.py index 73d4214eb9e..a996fe44084 100644 --- a/files/assemble.py +++ b/files/assemble.py @@ -91,9 +91,11 @@ options: validate is passed in via '%s' which must be present as in the sshd example below. The command is passed securely so shell features like expansion and pipes won't work. required: false - default: "" + default: null + version_added: "2.0" author: "Stephen Fromm (@sfromm)" -extends_documentation_fragment: files +extends_documentation_fragment: + - files ''' EXAMPLES = ''' @@ -104,7 +106,7 @@ EXAMPLES = ''' - assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf delimiter='### START FRAGMENT ###' # Copy a new "sshd_config" file into place, after passing validation with sshd -- assemble: src=/etc/ssh/conf.d/ dest=/etc/ssh/sshd_config validate='sshd -t -f %s' +- assemble: src=/etc/ssh/conf.d/ dest=/etc/ssh/sshd_config validate='/usr/sbin/sshd -t -f %s' ''' # =========================================== From bbcfb1092ae22e8520f6241b9da7f99a4f7423cd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 1 Sep 2015 10:57:37 -0400 Subject: [PATCH 361/386] check systemctl status before show as show will not return antyhing other than rc=0 even when it fails. --- system/service.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 8495bec9e24..70ff83517fd 100644 --- a/system/service.py +++ b/system/service.py @@ -520,7 +520,13 @@ class LinuxService(Service): return False def get_systemd_status_dict(self): - (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,)) + + # Check status first as show will not fail if service does not exist + (rc, out, err) = self.execute_command("%s status '%s'" % (self.enable_cmd, self.__systemd_unit,)) + if rc != 0: + self.module.fail_json(msg='failure %d running systemctl status for %r: %s' % (rc, self.__systemd_unit, err)) + + (rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,)) if rc != 0: self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err)) key = None From 3830af652f73b8d896dd0a1d670ebc33e65489b0 Mon Sep 17 00:00:00 2001 From: Chrrrles Paul Date: Tue, 1 Sep 2015 10:47:43 -0500 Subject: [PATCH 362/386] Revert "add virtual floppy to VMware guest" --- cloud/vmware/vsphere_guest.py | 62 ----------------------------------- 1 file changed, 62 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index c152491a8aa..701df22dfba 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -185,9 +185,6 @@ EXAMPLES = ''' vm_cdrom: type: "iso" iso_path: "DatastoreName/cd-image.iso" - vm_floppy: - type: "image" - image_path: "DatastoreName/floppy-image.flp" esxi: datacenter: MyDatacenter hostname: esx001.mydomain.local @@ -379,44 +376,6 @@ def add_cdrom(module, s, config_target, config, devices, default_devs, type="cli devices.append(cd_spec) -def add_floppy(module, s, config_target, config, devices, default_devs, type="image", vm_floppy_image_path=None): - # Add a floppy - # Make sure the datastore exists. - if vm_floppy_image_path: - image_location = vm_floppy_image_path.split('/', 1) - datastore, ds = find_datastore( - module, s, image_location[0], config_target) - image_path = image_location[1] - - floppy_spec = config.new_deviceChange() - floppy_spec.set_element_operation('add') - floppy_ctrl = VI.ns0.VirtualFloppy_Def("floppy_ctrl").pyclass() - - if type == "image": - image = VI.ns0.VirtualFloppyImageBackingInfo_Def("image").pyclass() - ds_ref = image.new_datastore(ds) - ds_ref.set_attribute_type(ds.get_attribute_type()) - image.set_element_datastore(ds_ref) - image.set_element_fileName("%s %s" % (datastore, image_path)) - floppy_ctrl.set_element_backing(image) - floppy_ctrl.set_element_key(3) - floppy_spec.set_element_device(floppy_ctrl) - elif type == "client": - client = VI.ns0.VirtualFloppyRemoteDeviceBackingInfo_Def( - "client").pyclass() - client.set_element_deviceName("/dev/fd0") - floppy_ctrl.set_element_backing(client) - floppy_ctrl.set_element_key(3) - floppy_spec.set_element_device(floppy_ctrl) - else: - s.disconnect() - module.fail_json( - msg="Error adding floppy of type %s to vm spec. " - " floppy type can either be image or client" % (type)) - - devices.append(floppy_spec) - - def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"): # add a NIC # Different network card types are: "VirtualE1000", @@ -987,27 +946,6 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, # Add a CD-ROM device to the VM. add_cdrom(module, vsphere_client, config_target, config, devices, default_devs, cdrom_type, cdrom_iso_path) - if 'vm_floppy' in vm_hardware: - floppy_image_path = None - floppy_type = None - try: - floppy_type = vm_hardware['vm_floppy']['type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. floppy type needs to be" - " specified." % vm_hardware['vm_floppy']) - if floppy_type == 'image': - try: - floppy_image_path = vm_hardware['vm_floppy']['image_path'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. floppy image_path needs" - " to be specified." % vm_hardware['vm_floppy']) - # Add a floppy to the VM. - add_floppy(module, vsphere_client, config_target, config, devices, - default_devs, floppy_type, floppy_image_path) if vm_nic: for nic in sorted(vm_nic.iterkeys()): try: From 8ebd6cc7cdab4ac4db2bcfd21c58564a3529d466 Mon Sep 17 00:00:00 2001 From: Chrrrles Paul Date: Tue, 1 Sep 2015 10:55:10 -0500 Subject: [PATCH 363/386] Revert "Add 2.0 docs - Revert "add virtual floppy to VMware guest"" --- cloud/vmware/vsphere_guest.py | 62 +++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 701df22dfba..c152491a8aa 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -185,6 +185,9 @@ EXAMPLES = ''' vm_cdrom: type: "iso" iso_path: "DatastoreName/cd-image.iso" + vm_floppy: + type: "image" + image_path: "DatastoreName/floppy-image.flp" esxi: datacenter: MyDatacenter hostname: esx001.mydomain.local @@ -376,6 +379,44 @@ def add_cdrom(module, s, config_target, config, devices, default_devs, type="cli devices.append(cd_spec) +def add_floppy(module, s, config_target, config, devices, default_devs, type="image", vm_floppy_image_path=None): + # Add a floppy + # Make sure the datastore exists. + if vm_floppy_image_path: + image_location = vm_floppy_image_path.split('/', 1) + datastore, ds = find_datastore( + module, s, image_location[0], config_target) + image_path = image_location[1] + + floppy_spec = config.new_deviceChange() + floppy_spec.set_element_operation('add') + floppy_ctrl = VI.ns0.VirtualFloppy_Def("floppy_ctrl").pyclass() + + if type == "image": + image = VI.ns0.VirtualFloppyImageBackingInfo_Def("image").pyclass() + ds_ref = image.new_datastore(ds) + ds_ref.set_attribute_type(ds.get_attribute_type()) + image.set_element_datastore(ds_ref) + image.set_element_fileName("%s %s" % (datastore, image_path)) + floppy_ctrl.set_element_backing(image) + floppy_ctrl.set_element_key(3) + floppy_spec.set_element_device(floppy_ctrl) + elif type == "client": + client = VI.ns0.VirtualFloppyRemoteDeviceBackingInfo_Def( + "client").pyclass() + client.set_element_deviceName("/dev/fd0") + floppy_ctrl.set_element_backing(client) + floppy_ctrl.set_element_key(3) + floppy_spec.set_element_device(floppy_ctrl) + else: + s.disconnect() + module.fail_json( + msg="Error adding floppy of type %s to vm spec. " + " floppy type can either be image or client" % (type)) + + devices.append(floppy_spec) + + def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"): # add a NIC # Different network card types are: "VirtualE1000", @@ -946,6 +987,27 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, # Add a CD-ROM device to the VM. add_cdrom(module, vsphere_client, config_target, config, devices, default_devs, cdrom_type, cdrom_iso_path) + if 'vm_floppy' in vm_hardware: + floppy_image_path = None + floppy_type = None + try: + floppy_type = vm_hardware['vm_floppy']['type'] + except KeyError: + vsphere_client.disconnect() + module.fail_json( + msg="Error on %s definition. floppy type needs to be" + " specified." % vm_hardware['vm_floppy']) + if floppy_type == 'image': + try: + floppy_image_path = vm_hardware['vm_floppy']['image_path'] + except KeyError: + vsphere_client.disconnect() + module.fail_json( + msg="Error on %s definition. floppy image_path needs" + " to be specified." % vm_hardware['vm_floppy']) + # Add a floppy to the VM. + add_floppy(module, vsphere_client, config_target, config, devices, + default_devs, floppy_type, floppy_image_path) if vm_nic: for nic in sorted(vm_nic.iterkeys()): try: From 6a40e8b4545822bdaf0e33c44e680d53bd8f7175 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Tue, 1 Sep 2015 14:53:11 -0500 Subject: [PATCH 364/386] vm_floppy 2.0 support --- cloud/vmware/vsphere_guest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index c152491a8aa..41da954ac32 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -152,6 +152,7 @@ EXAMPLES = ''' # Returns changed = True and a adds ansible_facts from the new VM # State will set the power status of a guest upon creation. Use powered_on to create and boot. # Options ['state', 'vm_extra_config', 'vm_disk', 'vm_nic', 'vm_hardware', 'esxi'] are required together +# Note: vm_floppy support added in 2.0 - vsphere_guest: vcenter_hostname: vcenter.mydomain.local From c54f875fdd7f26a2fce6cd2986b096a283452b2e Mon Sep 17 00:00:00 2001 From: James Martin Date: Wed, 2 Sep 2015 10:18:38 -0500 Subject: [PATCH 365/386] Adds sanity check to make sure nics is a list. --- cloud/openstack/os_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 959f39880f8..90cc7282d04 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -392,7 +392,7 @@ def main(): flavor_include = dict(default=None), key_name = dict(default=None), security_groups = dict(default='default'), - nics = dict(default=[]), + nics = dict(default=[], type='list'), meta = dict(default=None), userdata = dict(default=None), config_drive = dict(default=False, type='bool'), From e278f285aa6f61e45416be28b1e689b4d7607196 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 2 Sep 2015 17:09:53 -0400 Subject: [PATCH 366/386] partially reverted previous change to deal with systemctl show status not returning errors on missing service Now it looks for not-found key instead of running status which does return error codes when service is present but in diff states. fixes #12216 --- system/service.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/system/service.py b/system/service.py index 70ff83517fd..4255ecb83ab 100644 --- a/system/service.py +++ b/system/service.py @@ -522,13 +522,12 @@ class LinuxService(Service): def get_systemd_status_dict(self): # Check status first as show will not fail if service does not exist - (rc, out, err) = self.execute_command("%s status '%s'" % (self.enable_cmd, self.__systemd_unit,)) - if rc != 0: - self.module.fail_json(msg='failure %d running systemctl status for %r: %s' % (rc, self.__systemd_unit, err)) - (rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,)) if rc != 0: self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err)) + elif 'LoadState=not-found' in out: + self.module.fail_json(msg='systemd could not find the requested service "%r": %s' % (self.__systemd_unit, err)) + key = None value_buffer = [] status_dict = {} From 2520627fe7d487a08e077a01bb1251f3757f0515 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Sep 2015 08:46:35 -0700 Subject: [PATCH 367/386] Make sure listener ports are ints. May fix #1984 --- cloud/amazon/ec2_elb_lb.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 3d54f994436..8c739e1a2b2 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -573,8 +573,8 @@ class ElbManager(object): # N.B. string manipulations on protocols below (str(), upper()) is to # ensure format matches output from ELB API listener_list = [ - listener['load_balancer_port'], - listener['instance_port'], + int(listener['load_balancer_port']), + int(listener['instance_port']), str(listener['protocol'].upper()), ] From 1f358f349b73e008801f32cf046b5533abbefc5c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Sep 2015 15:39:18 -0700 Subject: [PATCH 368/386] We had two separate methods trying to do the same thing but neither one was complete. This merges them so that all of the options get parsed and applied. --- cloud/docker/docker.py | 89 ++++++++++++------------------------------ 1 file changed, 26 insertions(+), 63 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 82c39006678..99ede1b564f 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -801,7 +801,8 @@ class DockerManager(object): optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid'): + 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', + 'cap_add', 'cap_drop'): optionals[optional_param] = self.module.params.get(optional_param) if optionals['dns'] is not None: @@ -818,13 +819,35 @@ class DockerManager(object): if params['restart_policy']['Name'] == 'on-failure': params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + # docker_py only accepts 'host' or None + if 'pid' in optionals and not optionals['pid']: + optionals['pid'] = None + if optionals['pid'] is not None: self.ensure_capability('pid') params['pid_mode'] = optionals['pid'] + if optionals['extra_hosts'] is not None: + self.ensure_capability('extra_hosts') + params['extra_hosts'] = optionals['extra_hosts'] + + if optionals['log_driver'] is not None: + self.ensure_capability('log_driver') + log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) + log_config.type = optionals['log_driver'] + params['log_config'] = log_config + + if optionals['cap_add'] is not None: + self.ensure_capability('cap_add') + params['cap_add'] = optionals['cap_add'] + + if optionals['cap_drop'] is not None: + self.ensure_capability('cap_drop') + params['cap_drop'] = optionals['cap_drop'] + return params - def get_host_config(self): + def create_host_config(self): """ Create HostConfig object """ @@ -1340,65 +1363,6 @@ class DockerManager(object): except Exception as e: self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) - def create_host_config(self): - params = { - 'lxc_conf': self.lxc_conf, - 'binds': self.binds, - 'port_bindings': self.port_bindings, - 'publish_all_ports': self.module.params.get('publish_all_ports'), - 'privileged': self.module.params.get('privileged'), - 'links': self.links, - 'network_mode': self.module.params.get('net'), - } - - optionals = {} - for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', - 'cap_add', 'cap_drop'): - optionals[optional_param] = self.module.params.get(optional_param) - - if optionals['dns'] is not None: - self.ensure_capability('dns') - params['dns'] = optionals['dns'] - - if optionals['volumes_from'] is not None: - self.ensure_capability('volumes_from') - params['volumes_from'] = optionals['volumes_from'] - - if optionals['restart_policy'] is not None: - self.ensure_capability('restart_policy') - params['restart_policy'] = { 'Name': optionals['restart_policy'] } - if params['restart_policy']['Name'] == 'on-failure': - params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] - - # docker_py only accepts 'host' or None - if 'pid' in optionals and not optionals['pid']: - optionals['pid'] = None - - if optionals['pid'] is not None: - self.ensure_capability('pid') - params['pid_mode'] = optionals['pid'] - - if optionals['extra_hosts'] is not None: - self.ensure_capability('extra_hosts') - params['extra_hosts'] = optionals['extra_hosts'] - - if optionals['log_driver'] is not None: - self.ensure_capability('log_driver') - log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) - log_config.type = optionals['log_driver'] - params['log_config'] = log_config - - if optionals['cap_add'] is not None: - self.ensure_capability('cap_add') - params['cap_add'] = optionals['cap_add'] - - if optionals['cap_drop'] is not None: - self.ensure_capability('cap_drop') - params['cap_drop'] = optionals['cap_drop'] - - return docker.utils.create_host_config(**params) - def create_containers(self, count=1): try: mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) @@ -1418,11 +1382,10 @@ class DockerManager(object): 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), 'cpuset': self.module.params.get('cpu_set'), - 'host_config': self.create_host_config(), 'user': self.module.params.get('docker_user'), } if self.ensure_capability('host_config', fail=False): - params['host_config'] = self.get_host_config() + params['host_config'] = self.create_host_config() #For v1.19 API and above use HostConfig, otherwise use Config if api_version < 1.19: From 6e5a832dc28e1de72f296f7fe2b9bda294bc5b50 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 4 Sep 2015 15:59:19 -0700 Subject: [PATCH 369/386] Fix read-only usage to depend on the docker-py and docker server version --- cloud/docker/docker.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 99ede1b564f..f236f1b52fb 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -268,7 +268,7 @@ options: read_only: description: - Mount the container's root filesystem as read only - default: false + default: null aliases: [] version_added: "2.0" restart_policy: @@ -796,13 +796,12 @@ class DockerManager(object): 'privileged': self.module.params.get('privileged'), 'links': self.links, 'network_mode': self.module.params.get('net'), - 'read_only': self.module.params.get('read_only'), } optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', - 'cap_add', 'cap_drop'): + 'cap_add', 'cap_drop', 'read_only'): optionals[optional_param] = self.module.params.get(optional_param) if optionals['dns'] is not None: @@ -845,6 +844,10 @@ class DockerManager(object): self.ensure_capability('cap_drop') params['cap_drop'] = optionals['cap_drop'] + if optionals['read_only'] is not None: + self.ensure_capability('read_only') + params['read_only'] = optionals['read_only'] + return params def create_host_config(self): @@ -1627,7 +1630,7 @@ def main(): cpu_set = dict(default=None), cap_add = dict(default=None, type='list'), cap_drop = dict(default=None, type='list'), - read_only = dict(default=False, type='bool'), + read_only = dict(default=None, type='bool'), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From eaa45dcbd9e0deef3b78145afa86d7fe7c8500fb Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Fri, 4 Sep 2015 19:44:35 -0400 Subject: [PATCH 370/386] Add network_interfaces parameter to ec2 module to support launch-time ENIs --- cloud/amazon/ec2.py | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 9ad64c8e9fb..1b97d908b4a 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -240,6 +240,13 @@ options: required: false default: null aliases: [] + network_interfaces: + version_added: "2.0" + description: + - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are for creating a new network interface at launch.) + required: false + default: null + aliases: [] author: - "Tim Gerla (@tgerla)" @@ -826,11 +833,21 @@ def create_instances(module, ec2, vpc, override_count=None): count_tag = module.params.get('count_tag') source_dest_check = module.boolean(module.params.get('source_dest_check')) termination_protection = module.boolean(module.params.get('termination_protection')) + network_interfaces = module.params.get('network_interfaces') # group_id and group_name are exclusive of each other if group_id and group_name: module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) + if (network_interfaces and + (assign_public_ip or private_ip or vpc_subnet_id + or group_name or group_id)): + module.fail_json( + msg=str("network_interfaces must not be set when specifying " + + "assign_public_ip, private_ip, vpc_subnet_id, group, " + + "or group_id, which are used to create a new network " + + "interface.")) + vpc_id = None if vpc_subnet_id: if not vpc: @@ -926,11 +943,21 @@ def create_instances(module, ec2, vpc, override_count=None): interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) params['network_interfaces'] = interfaces else: - params['subnet_id'] = vpc_subnet_id - if vpc_subnet_id: - params['security_group_ids'] = group_id + if network_interfaces: + interfaces = [] + for i, network_interface_id in enumerate(network_interfaces): + interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( + network_interface_id=network_interface_id, + device_index=i) + interfaces.append(interface) + params['network_interfaces'] = \ + boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces) else: - params['security_groups'] = group_name + params['subnet_id'] = vpc_subnet_id + if vpc_subnet_id: + params['security_group_ids'] = group_id + else: + params['security_groups'] = group_name if volumes: bdm = BlockDeviceMapping() @@ -1284,6 +1311,7 @@ def main(): volumes = dict(type='list'), ebs_optimized = dict(type='bool', default=False), tenancy = dict(default='default'), + network_interfaces = dict(type='list') ) ) From 2ae37e7845c6acfb03cd0dadf2225bfaf82dc8e6 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Fri, 4 Sep 2015 20:46:26 -0400 Subject: [PATCH 371/386] os_server: nice error when flavor not found When we can't find the VM flavor that the user requests, this change replaces the non-descript stack trace with a clear error message. --- cloud/openstack/os_server.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 90cc7282d04..1fe1a7b65a3 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -287,8 +287,12 @@ def _create_server(module, cloud): if flavor: flavor_dict = cloud.get_flavor(flavor) + if not flavor_dict: + module.fail_json(msg="Could not find flavor %s" % flavor) else: flavor_dict = cloud.get_flavor_by_ram(flavor_ram, flavor_include) + if not flavor_dict: + module.fail_json(msg="Could not find any matching flavor") nics = _network_args(module, cloud) From 34655e8e29c089e33cd505198e1356b893dfff32 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 5 Sep 2015 08:56:02 -0700 Subject: [PATCH 372/386] correct documentation formatting --- cloud/amazon/ec2_asg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index e67d2a07d39..db6cd061480 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -130,7 +130,7 @@ options: description: - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. required: false - default: Default. E.g.: When used to create a new autoscaling group, the “Default” value is used. When used to change an existent autoscaling group, the current termination policies are mantained + default: Default. Eg, when used to create a new autoscaling group, the “Default” value is used. When used to change an existent autoscaling group, the current termination policies are mantained choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] version_added: "2.0" extends_documentation_fragment: aws From f1efc121eeb295f90973f5148290d505beb91306 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 6 Sep 2015 11:18:30 -0400 Subject: [PATCH 373/386] corrected whitepace --- cloud/amazon/ec2_asg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index db6cd061480..4944674a55c 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -111,7 +111,7 @@ options: choices: ['EC2', 'ELB'] default_cooldown: description: - - The number of seconds after a scaling activity completes before another can begin. + - The number of seconds after a scaling activity completes before another can begin. required: false default: 300 seconds version_added: "2.0" From a20a78a50c07214565b6e3859e9b6531d9ed6d0c Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Sun, 6 Sep 2015 13:03:04 -0400 Subject: [PATCH 374/386] Remove aliases specification from documentation since there are none --- cloud/amazon/ec2.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 1b97d908b4a..3b2e59577d6 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -246,7 +246,6 @@ options: - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are for creating a new network interface at launch.) required: false default: null - aliases: [] author: - "Tim Gerla (@tgerla)" From 5db3f14e94e88597d9f2f058a57b4512775d41eb Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Sun, 6 Sep 2015 13:18:20 -0400 Subject: [PATCH 375/386] Add network_interfaces example --- cloud/amazon/ec2.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 3b2e59577d6..420fac28f5f 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -358,6 +358,13 @@ EXAMPLES = ''' vpc_subnet_id: subnet-29e63245 assign_public_ip: yes +# Example using pre-existing network interfaces +- ec2: + key_name: mykey + instance_type: t2.small + image: ami-f005ba11 + network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e'] + # Launch instances, runs some tasks # and then terminate them From 44f3618dd3c06ef6b1af2b196c46a2d7575c35f8 Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Sun, 6 Sep 2015 13:53:28 -0400 Subject: [PATCH 376/386] Add alias `network_interface` and accept a string for a single ENI --- cloud/amazon/ec2.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 420fac28f5f..bd51bcd7ed3 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -246,6 +246,7 @@ options: - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are for creating a new network interface at launch.) required: false default: null + aliases: ['network_interface'] author: - "Tim Gerla (@tgerla)" @@ -358,7 +359,13 @@ EXAMPLES = ''' vpc_subnet_id: subnet-29e63245 assign_public_ip: yes -# Example using pre-existing network interfaces +# Examples using pre-existing network interfaces +- ec2: + key_name: mykey + instance_type: t2.small + image: ami-f005ba11 + network_interface: eni-deadbeef + - ec2: key_name: mykey instance_type: t2.small @@ -950,6 +957,8 @@ def create_instances(module, ec2, vpc, override_count=None): params['network_interfaces'] = interfaces else: if network_interfaces: + if isinstance(network_interfaces, basestring): + network_interfaces = [network_interfaces] interfaces = [] for i, network_interface_id in enumerate(network_interfaces): interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( @@ -1317,7 +1326,7 @@ def main(): volumes = dict(type='list'), ebs_optimized = dict(type='bool', default=False), tenancy = dict(default='default'), - network_interfaces = dict(type='list') + network_interfaces = dict(type='list', aliases=['network_interface']) ) ) From 27e9318ffea622136d34a8c7832312b46d6e6aaa Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Sun, 6 Sep 2015 14:00:35 -0400 Subject: [PATCH 377/386] Use general-purpose parameter mutual exclusion code for network_interfaces --- cloud/amazon/ec2.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index bd51bcd7ed3..14b278e76ba 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -852,15 +852,6 @@ def create_instances(module, ec2, vpc, override_count=None): if group_id and group_name: module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) - if (network_interfaces and - (assign_public_ip or private_ip or vpc_subnet_id - or group_name or group_id)): - module.fail_json( - msg=str("network_interfaces must not be set when specifying " + - "assign_public_ip, private_ip, vpc_subnet_id, group, " + - "or group_id, which are used to create a new network " + - "interface.")) - vpc_id = None if vpc_subnet_id: if not vpc: @@ -1335,7 +1326,12 @@ def main(): mutually_exclusive = [ ['exact_count', 'count'], ['exact_count', 'state'], - ['exact_count', 'instance_ids'] + ['exact_count', 'instance_ids'], + ['network_interfaces', 'assign_public_ip'], + ['network_interfaces', 'group'], + ['network_interfaces', 'group_id'], + ['network_interfaces', 'private_ip'], + ['network_interfaces', 'vpc_subnet_id'], ], ) From a0a374b8cad5da9af0aca4ac24e212318267ce14 Mon Sep 17 00:00:00 2001 From: David Dyball Date: Tue, 8 Sep 2015 16:19:26 +0100 Subject: [PATCH 378/386] Updated PR based on comments --- cloud/docker/docker.py | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f236f1b52fb..0ab564208ba 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -109,6 +109,14 @@ options: - none - syslog version_added: "2.0" + log_opt: + description: + - Additional options to pass to the logging driver selected above. See Docker log-driver + documentation for more information (https://docs.docker.com/reference/logging/overview/). + Requires docker >=1.7.0. + required: false + default: null + version_added: "2.0" memory_limit: description: - RAM allocated to the container as a number of bytes or as a human-readable @@ -415,6 +423,19 @@ EXAMPLES = ''' name: ohno image: someuser/oldandbusted state: absent + +# Example Syslogging Output + +- name: myservice container + docker: + name: myservice + image: someservice/someimage + state: reloaded + log_driver: syslog + log_opt: + syslog-address: tcp://my-syslog-server:514 + syslog-facility: daemon + syslog-tag: myservice ''' HAS_DOCKER_PY = True @@ -570,6 +591,7 @@ class DockerManager(object): 'extra_hosts': ((0, 7, 0), '1.3.1'), 'pid': ((1, 0, 0), '1.17'), 'log_driver': ((1, 2, 0), '1.18'), + 'log_opt': ((1, 2, 0), '1.18'), 'host_config': ((0, 7, 0), '1.15'), 'cpu_set': ((0, 6, 0), '1.14'), 'cap_add': ((0, 5, 0), '1.14'), @@ -801,7 +823,7 @@ class DockerManager(object): optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', - 'cap_add', 'cap_drop', 'read_only'): + 'cap_add', 'cap_drop', 'read_only', 'log_opt'): optionals[optional_param] = self.module.params.get(optional_param) if optionals['dns'] is not None: @@ -833,6 +855,9 @@ class DockerManager(object): if optionals['log_driver'] is not None: self.ensure_capability('log_driver') log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) + if optionals['log_opt'] is not None: + for k, v in optionals['log_opt'].iteritems(): + log_config.set_config_value(k, v) log_config.type = optionals['log_driver'] params['log_config'] = log_config @@ -1262,7 +1287,7 @@ class DockerManager(object): # LOG_DRIVER - if self.ensure_capability('log_driver', False) : + if self.ensure_capability('log_driver', False): expected_log_driver = self.module.params.get('log_driver') or 'json-file' actual_log_driver = container['HostConfig']['LogConfig']['Type'] if actual_log_driver != expected_log_driver: @@ -1270,6 +1295,17 @@ class DockerManager(object): differing.append(container) continue + if self.ensure_capability('log_opt', False): + expected_logging_opts = self.module.params.get('log_opt') or {} + actual_log_opts = container['HostConfig']['LogConfig']['Config'] + if len(set(expected_logging_opts.items()) - set(actual_log_opts.items())) != 0: + log_opt_reasons = { + 'added': dict(set(expected_logging_opts.items()) - set(actual_log_opts.items())), + 'removed': dict(set(actual_log_opts.items()) - set(expected_logging_opts.items())) + } + self.reload_reasons.append('log_opt ({0})'.format(log_opt_reasons)) + differing.append(container) + return differing def get_deployed_containers(self): @@ -1627,6 +1663,7 @@ def main(): pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), + log_opt = dict(default=None, type='dict'), cpu_set = dict(default=None), cap_add = dict(default=None, type='list'), cap_drop = dict(default=None, type='list'), From ab509438874ca89f044c78fb53c7f6ebcb6c292c Mon Sep 17 00:00:00 2001 From: Colin Hutchinson Date: Tue, 8 Sep 2015 12:00:21 -0400 Subject: [PATCH 379/386] Update get_url.py use an abbreviated sha --- network/basics/get_url.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 0212e41a7db..0c100dd0187 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -81,7 +81,7 @@ options: - 'If a checksum is passed to this parameter, the digest of the destination file will be calculated after it is downloaded to ensure its integrity and verify that the transfer completed successfully. - Format: :, e.g.: checksum="sha256:shagoeshere" + Format: :, e.g.: checksum="sha256:D98291AC[...]B6DC7B97" If you worry about portability, only the sha1 algorithm is available on all platforms and python versions. The third party hashlib library can be installed for access to additional algorithms.' From 7104096c7cba6ec1052f5f6a96056331a848e763 Mon Sep 17 00:00:00 2001 From: Yanchek99 Date: Tue, 8 Sep 2015 09:16:53 -0700 Subject: [PATCH 380/386] Fixed typo for instance_tags documentation --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 9ad64c8e9fb..e9816366921 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -144,7 +144,7 @@ options: instance_tags: version_added: "1.0" description: - - a hash/dictionary of tags to add to the new instance or for for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}' + - a hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}' required: false default: null aliases: [] From 15f4b596192afb2cf172e4ab140b4fa9eaff25e5 Mon Sep 17 00:00:00 2001 From: Taneli Lepp Date: Tue, 8 Sep 2015 13:00:37 +0300 Subject: [PATCH 381/386] Added option to set multiple ACLs for S3 objects. Also verifies the selected permissions against Boto's canned ACL strings list. --- cloud/amazon/s3.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index e82f20d98e6..64f53cc042a 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -95,7 +95,7 @@ options: default: null permission: description: - - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. + - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. Multiple permissions can be specified as a list. required: false default: private version_added: "2.0" @@ -198,6 +198,7 @@ try: from boto.s3.connection import Location from boto.s3.connection import OrdinaryCallingFormat from boto.s3.connection import S3Connection + from boto.s3.acl import CannedACLStrings HAS_BOTO = True except ImportError: HAS_BOTO = False @@ -242,7 +243,8 @@ def create_bucket(module, s3, bucket, location=None): location = Location.DEFAULT try: bucket = s3.create_bucket(bucket, location=location) - bucket.set_acl(module.params.get('permission')) + for acl in module.params.get('permission'): + bucket.set_acl(acl) except s3.provider.storage_response_error, e: module.fail_json(msg= str(e)) if bucket: @@ -304,7 +306,8 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, heade key.set_metadata(meta_key, metadata[meta_key]) key.set_contents_from_filename(src, encrypt_key=encrypt, headers=headers) - key.set_acl(module.params.get('permission')) + for acl in module.params.get('permission'): + key.set_acl(acl) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) except s3.provider.storage_copy_error, e: @@ -377,7 +380,7 @@ def main(): metadata = dict(type='dict'), mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), object = dict(), - permission = dict(choices=['private', 'public-read', 'public-read-write', 'authenticated-read'], default='private'), + permission = dict(type='list', default=['private']), version = dict(default=None), overwrite = dict(aliases=['force'], default='always'), prefix = dict(default=None), @@ -409,6 +412,10 @@ def main(): s3_url = module.params.get('s3_url') src = module.params.get('src') + for acl in module.params.get('permission'): + if acl not in CannedACLStrings: + module.fail_json(msg='Unknown permission specified: %s' % str(acl)) + if overwrite not in ['always', 'never', 'different']: if module.boolean(overwrite): overwrite = 'always' From 2cc431f791a45954305a01dbd9f96536a4a80f96 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Wed, 9 Sep 2015 01:29:38 -0400 Subject: [PATCH 382/386] Fix indentation in os_server documentation --- cloud/openstack/os_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 1fe1a7b65a3..a0f97d6ecb8 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -137,7 +137,7 @@ options: - Boot instance from a volume required: false default: None - terminate_volume: + terminate_volume: description: - If true, delete volume when deleting instance (if booted from volume) default: false From 7982d582d972396b507f4d46508f30d3f1bd0ac6 Mon Sep 17 00:00:00 2001 From: Sam Mingo Date: Wed, 9 Sep 2015 17:38:05 -0400 Subject: [PATCH 383/386] Update get_url.py Fixed typo with header parameter to get_url in the documentation. --- network/basics/get_url.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 0c100dd0187..fad0d58f878 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -153,7 +153,7 @@ EXAMPLES=''' get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes - name: download file with custom HTTP headers - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf headers: 'key:value,key:value' + get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf headers='key:value,key:value' - name: download file with check get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c From f449a0f48c02b7d22a64f9b563b1a593d8269118 Mon Sep 17 00:00:00 2001 From: Abitha Palaniappan Date: Wed, 9 Sep 2015 15:38:39 -0700 Subject: [PATCH 384/386] Add support for port-id,port-name to nics in os_server --- cloud/openstack/os_server.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index a0f97d6ecb8..44481d643f4 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -262,6 +262,15 @@ def _network_args(module, cloud): msg='Could not find network by net-name: %s' % net['net-name']) args.append({'net-id': by_name['id']}) + elif net.get('port-id'): + args.append(net) + elif net.get('port-name'): + by_name = cloud.get_port(net['port-name']) + if not by_name: + module.fail_json( + msg='Could not find port by port-name: %s' % + net['port-name']) + args.append({'port-id': by_name['id']}) return args From 05c3b3ea152a20f7fd9cd91b135c4e1af0c66017 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 10 Sep 2015 09:07:38 -0400 Subject: [PATCH 385/386] minor doc fix --- cloud/amazon/ec2_asg.py | 5 +-- windows/win_lineinfile.py | 76 +++++++++++---------------------------- 2 files changed, 23 insertions(+), 58 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 4944674a55c..c78bf462a8a 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -67,7 +67,7 @@ options: - Number of instances you'd like to replace at a time. Used with replace_all_instances. required: false version_added: "1.8" - default: 1 + default: 1 replace_instances: description: - List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration. @@ -129,8 +129,9 @@ options: termination_policies: description: - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. + - For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained required: false - default: Default. Eg, when used to create a new autoscaling group, the “Default” value is used. When used to change an existent autoscaling group, the current termination policies are mantained + default: Default choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] version_added: "2.0" extends_documentation_fragment: aws diff --git a/windows/win_lineinfile.py b/windows/win_lineinfile.py index 6c54fd2bea8..3efc87cdf70 100644 --- a/windows/win_lineinfile.py +++ b/windows/win_lineinfile.py @@ -19,13 +19,11 @@ DOCUMENTATION = """ --- module: win_lineinfile -author: Brian Lloyd (brian.d.lloyd@gmail.com) -short_description: Ensure a particular line is in a file, or replace an - existing line using a back-referenced regular expression. +author: "Brian Lloyd " +short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression. description: - This module will search a file for a line, and ensure that it is present or absent. - - This is primarily useful when you want to change a single line in - a file only. + - This is primarily useful when you want to change a single line in a file only. version_added: "2.0" options: dest: @@ -36,11 +34,7 @@ options: regexp: required: false description: - - The regular expression to look for in every line of the file. For - C(state=present), the pattern to replace if found; only the last line - found will be replaced. For C(state=absent), the pattern of the line - to remove. Uses .NET compatible regular expressions; see - U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx). + - "The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions; see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx)." state: required: false choices: [ present, absent ] @@ -50,92 +44,62 @@ options: line: required: false description: - - Required for C(state=present). The line to insert/replace into the - file. If C(backrefs) is set, may contain backreferences that will get - expanded with the C(regexp) capture groups if the regexp matches. + - Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get expanded with the C(regexp) capture groups if the regexp matches. backrefs: required: false default: "no" choices: [ "yes", "no" ] description: - - Used with C(state=present). If set, line can contain backreferences - (both positional and named) that will get populated if the C(regexp) - matches. This flag changes the operation of the module slightly; - C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp) - doesn't match anywhere in the file, the file will be left unchanged. - If the C(regexp) does match, the last matching line will be replaced by - the expanded line parameter. + - Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp) matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp) doesn't match anywhere in the file, the file will be left unchanged. + - If the C(regexp) does match, the last matching line will be replaced by the expanded line parameter. insertafter: required: false default: EOF description: - - Used with C(state=present). If specified, the line will be inserted - after the last match of specified regular expression. A special value is - available; C(EOF) for inserting the line at the end of the file. - If specified regular expresion has no matches, EOF will be used instead. - May not be used with C(backrefs). + - Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is available; C(EOF) for inserting the line at the end of the file. + - If specified regular expresion has no matches, EOF will be used instead. May not be used with C(backrefs). choices: [ 'EOF', '*regex*' ] insertbefore: required: false - version_added: "1.1" description: - - Used with C(state=present). If specified, the line will be inserted - before the last match of specified regular expression. A value is - available; C(BOF) for inserting the line at the beginning of the file. - If specified regular expresion has no matches, the line will be - inserted at the end of the file. May not be used with C(backrefs). + - Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available; C(BOF) for inserting the line at the beginning of the file. + - If specified regular expresion has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs). choices: [ 'BOF', '*regex*' ] create: required: false choices: [ "yes", "no" ] default: "no" description: - - Used with C(state=present). If specified, the file will be created - if it does not already exist. By default it will fail if the file - is missing. + - Used with C(state=present). If specified, the file will be created if it does not already exist. By default it will fail if the file is missing. backup: required: false default: "no" choices: [ "yes", "no" ] description: - - Create a backup file including the timestamp information so you can - get the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. validate: required: false description: - - validation to run before copying into place. - Use %s in the command to indicate the current file to validate. - The command is passed securely so shell features like - expansion and pipes won't work. + - Validation to run before copying into place. Use %s in the command to indicate the current file to validate. + - The command is passed securely so shell features like expansion and pipes won't work. required: false default: None encoding: required: false default: "auto" description: - - Specifies the encoding of the source text file to operate on (and thus what the - output encoding will be). The default of C(auto) will cause the module to auto-detect - the encoding of the source file and ensure that the modified file is written with the - same encoding. - An explicit encoding can be passed as a string that is a valid value to pass to - the .NET framework System.Text.Encoding.GetEncoding() method - see - U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx). - This is mostly useful with C(create=yes) if you want to create a new file with a specific - encoding. If C(create=yes) is specified without a specific encoding, the default encoding - (UTF-8, no BOM) will be used. + - Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding. + - "An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method - see U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx)." + - This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a specific encoding, the default encoding (UTF-8, no BOM) will be used. newline: required: false description: - - Specifies the line separator style to use for the modified file. This defaults - to the windows line separator (\r\n). Note that the indicated line separator - will be used for file output regardless of the original line seperator that - appears in the input file. + - Specifies the line separator style to use for the modified file. This defaults to the windows line separator (\r\n). Note that the indicated line separator will be used for file output regardless of the original line seperator that appears in the input file. choices: [ "windows", "unix" ] default: "windows" - """ -EXAMPLES = r""" +EXAMPLES = """ - win_lineinfile: dest=C:\\temp\\example.conf regexp=^name= line="name=JohnDoe" - win_lineinfile: dest=C:\\temp\\example.conf state=absent regexp="^name=" From 14ebc19897a259cd8de43e5b4292aa37878a1249 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 10 Sep 2015 16:18:35 -0400 Subject: [PATCH 386/386] fixed doc issues with win_lineinfile --- windows/win_lineinfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_lineinfile.py b/windows/win_lineinfile.py index 3efc87cdf70..24d0afdef7b 100644 --- a/windows/win_lineinfile.py +++ b/windows/win_lineinfile.py @@ -94,7 +94,7 @@ options: newline: required: false description: - - Specifies the line separator style to use for the modified file. This defaults to the windows line separator (\r\n). Note that the indicated line separator will be used for file output regardless of the original line seperator that appears in the input file. + - "Specifies the line separator style to use for the modified file. This defaults to the windows line separator (\r\n). Note that the indicated line separator will be used for file output regardless of the original line seperator that appears in the input file." choices: [ "windows", "unix" ] default: "windows" """