From 9ca27eedccfaf01f1f348362fd973bfe93c7a540 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Sun, 16 Nov 2014 01:13:29 +0100 Subject: [PATCH 001/114] apt_repository: fix file mode 'set_mode_if_different' method should be called on dest filename and after 'atomic_move' method --- packaging/os/apt_repository.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index 2ee5819fc4e..5f252371e4a 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -238,10 +238,6 @@ class SourcesList(object): d, fn = os.path.split(filename) fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d) - # allow the user to override the default mode - this_mode = module.params['mode'] - module.set_mode_if_different(tmp_path, this_mode, False) - f = os.fdopen(fd, 'w') for n, valid, enabled, source, comment in sources: chunks = [] @@ -259,6 +255,10 @@ class SourcesList(object): except IOError, err: module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err))) module.atomic_move(tmp_path, filename) + + # allow the user to override the default mode + this_mode = module.params['mode'] + module.set_mode_if_different(filename, this_mode, False) else: del self.files[filename] if os.path.exists(filename): From d7db09a0eb9d02adaabb0af7e167b9bdb9354644 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Sat, 22 Nov 2014 20:59:36 -0500 Subject: [PATCH 002/114] django_manage: expand ~ in app_path parameter Allow users to specify app_path parameters that contain ~, for example: app_path=~/myproject --- web_infrastructure/django_manage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 580cc63c2dd..424bf6821e4 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -218,7 +218,7 @@ def main(): ) command = module.params['command'] - app_path = module.params['app_path'] + app_path = os.path.expanduser(module.params['app_path']) virtualenv = module.params['virtualenv'] for param in specific_params: From 102167f22ecc8dd2dd4c0fed919f02579de18f17 Mon Sep 17 00:00:00 2001 From: Jim Patterson Date: Sun, 30 Nov 2014 19:31:09 -0500 Subject: [PATCH 003/114] Correct check mode for pip in virtualenv. Fix #412. Check mode was always returning changed=True for pip when the target was in a virtualenv. The code now uses the normal tests for determining if change status. --- packaging/language/pip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 17f52c00398..3ba93185a31 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -314,7 +314,7 @@ def main(): this_dir = os.path.join(this_dir, chdir) if module.check_mode: - if env or extra_args or requirements or state == 'latest' or not name: + if extra_args or requirements or state == 'latest' or not name: module.exit_json(changed=True) elif name.startswith('svn+') or name.startswith('git+') or \ name.startswith('hg+') or name.startswith('bzr+'): From cda40bc33c0da4444bd83ba527b198545ff99085 Mon Sep 17 00:00:00 2001 From: Sebastian Gerhards Date: Tue, 2 Dec 2014 11:33:10 +0100 Subject: [PATCH 004/114] rhn_register: add support for profilename --- packaging/os/rhn_register.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/packaging/os/rhn_register.py b/packaging/os/rhn_register.py index 1e92405c827..4207acc8c28 100644 --- a/packaging/os/rhn_register.py +++ b/packaging/os/rhn_register.py @@ -56,6 +56,11 @@ options: - supply an activation key for use with registration required: False default: null + profilename: + description: + - supply an profilename for use with registration + required: False + default: null channels: description: - Optionally specify a list of comma-separated channels to subscribe to upon successful registration. @@ -73,6 +78,9 @@ EXAMPLES = ''' # Register with activationkey (1-222333444) and enable extended update support. - rhn_register: state=present activationkey=1-222333444 enable_eus=true +# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname. +- rhn_register: state=present activationkey=1-222333444 profilename=host.example.com.custom + # Register as user (joe_user) with password (somepass) against a satellite # server specified by (server_url). - rhn_register: > @@ -209,7 +217,7 @@ class Rhn(RegistrationBase): self.update_plugin_conf('rhnplugin', True) self.update_plugin_conf('subscription-manager', False) - def register(self, enable_eus=False, activationkey=None): + def register(self, enable_eus=False, activationkey=None, profilename=None): ''' Register system to RHN. If enable_eus=True, extended update support will be requested. @@ -221,7 +229,8 @@ class Rhn(RegistrationBase): register_cmd += " --use-eus-channel" if activationkey is not None: register_cmd += " --activationkey '%s'" % activationkey - # FIXME - support --profilename + if profilename is not None: + register_cmd += " --profilename '%s'" % profilename # FIXME - support --systemorgid rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True) @@ -285,6 +294,7 @@ def main(): password = dict(default=None, required=False), server_url = dict(default=rhn.config.get_option('serverURL'), required=False), activationkey = dict(default=None, required=False), + profilename = dict(default=None, required=False), enable_eus = dict(default=False, type='bool'), channels = dict(default=[], type='list'), ) @@ -295,6 +305,7 @@ def main(): rhn.password = module.params['password'] rhn.configure(module.params['server_url']) activationkey = module.params['activationkey'] + profilename = module.params['profilename'] channels = module.params['channels'] rhn.module = module From 18183caf8616967e2a6ee6f10ca679b364a2f6ea Mon Sep 17 00:00:00 2001 From: Alex King Date: Mon, 8 Dec 2014 00:01:55 +1300 Subject: [PATCH 005/114] Extend hashes that can be specified by crypt_scheme beyond those understood by Apache/Nginx. --- web_infrastructure/htpasswd.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 4a72ea37fec..e263f842fa0 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -46,7 +46,10 @@ options: choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] default: "apr_md5_crypt" description: - - Encryption scheme to be used. + - Encryption scheme to be used. As well as the four choices listed + here, you can also use any other hash supported by passlib, such as + md5_crypt and sha256_crypt, which are linux passwd hashes. If you + do so the password file will not be compatible with Apache or Nginx state: required: false choices: [ present, absent ] @@ -74,6 +77,8 @@ EXAMPLES = """ - htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640 # Remove a user from a password file - htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent +# Add a user to a password file suitable for use by libpam-pwdfile +- htpasswd: path=/etc/mail/passwords name=alex password=oedu2eGh crypt_scheme=md5_crypt """ @@ -81,13 +86,15 @@ import os from distutils.version import StrictVersion try: - from passlib.apache import HtpasswdFile + from passlib.apache import HtpasswdFile, htpasswd_context + from passlib.context import CryptContext import passlib except ImportError: passlib_installed = False else: passlib_installed = True +apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] def create_missing_directories(dest): destpath = os.path.dirname(dest) @@ -99,6 +106,10 @@ def present(dest, username, password, crypt_scheme, create, check_mode): """ Ensures user is present Returns (msg, changed) """ + if crypt_scheme in apache_hashes: + context = htpasswd_context + else: + context = CryptContext(schemes = [ crypt_scheme ] + apache_hashes) if not os.path.exists(dest): if not create: raise ValueError('Destination %s does not exist' % dest) @@ -106,9 +117,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode): return ("Create %s" % dest, True) create_missing_directories(dest) if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme) + ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) else: - ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme) + ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) if getattr(ht, 'set_password', None): ht.set_password(username, password) else: @@ -117,9 +128,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode): return ("Created %s and added %s" % (dest, username), True) else: if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme) + ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) else: - ht = HtpasswdFile(dest, default=crypt_scheme) + ht = HtpasswdFile(dest, default=crypt_scheme, context=context) found = None if getattr(ht, 'check_password', None): From 3fca5e587694989cf74808d49341b83b487a782b Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Sun, 14 Dec 2014 22:53:21 +0000 Subject: [PATCH 006/114] Allow globbing in creates= and removes= directives Fixes 1904 --- commands/command.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/commands/command.py b/commands/command.py index c584d6feed8..bc286d6855d 100644 --- a/commands/command.py +++ b/commands/command.py @@ -20,6 +20,7 @@ import sys import datetime +import glob import traceback import re import shlex @@ -161,7 +162,7 @@ def main(): # and the filename already exists. This allows idempotence # of command executions. v = os.path.expanduser(creates) - if os.path.exists(v): + if glob.glob(v): module.exit_json( cmd=args, stdout="skipped, since %s exists" % v, @@ -175,7 +176,7 @@ def main(): # and the filename does not exist. This allows idempotence # of command executions. v = os.path.expanduser(removes) - if not os.path.exists(v): + if not glob.glob(v): module.exit_json( cmd=args, stdout="skipped, since %s does not exist" % v, From 8f3b5c640b98ba9473a0df7ddc650539a3efc048 Mon Sep 17 00:00:00 2001 From: Stefan Junker Date: Sun, 21 Dec 2014 16:10:39 +0100 Subject: [PATCH 007/114] Fix bind-volumes on docker >= 1.4.0 If bind-volumes are submitted to docker >= 1.4.0 with the volumes set in addition to the binds, docker will create a regular volume and not bind-mount the specified path. --- cloud/docker/docker.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f53819f2679..bbcb73df99b 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -484,20 +484,22 @@ class DockerManager(object): vols = self.module.params.get('volumes') for vol in vols: parts = vol.split(":") - # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) - if len(parts) == 2: - self.volumes[parts[1]] = {} - self.binds[parts[0]] = parts[1] - # with bind mode - elif len(parts) == 3: - if parts[2] not in ['ro', 'rw']: - self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') - ro = parts[2] == 'ro' - self.volumes[parts[1]] = {} - self.binds[parts[0]] = {'bind': parts[1], 'ro': ro} - # docker mount (e.g. /www, mounts a docker volume /www on the container at the same location) - else: + # regular volume + if len(parts) == 1: self.volumes[parts[0]] = {} + # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) + elif 2 <= len(parts) <= 3: + # default to read-write + ro = False + # with supplied bind mode + if len(parts) == 3: + if parts[2] not in ['ro', 'rw']: + self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') + else: + ro = parts[2] == 'ro' + self.binds[parts[0]] = {'bind': parts[1], 'ro': ro } + else: + self.module.fail_json(msg='volumes support 1 to 3 arguments') self.lxc_conf = None if self.module.params.get('lxc_conf'): From d9360a7613318a593d4ed5688269979dc60c7d72 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Fri, 26 Dec 2014 01:29:54 -0500 Subject: [PATCH 008/114] Update docs, add example of using django_manage to run other commands. --- web_infrastructure/django_manage.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 3e34a6388c0..d71001fd8c2 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -30,7 +30,8 @@ options: command: choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ] description: - - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. Other commands can be entered, but will fail if they're unknown to Django. + - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. + - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag. required: true app_path: description: @@ -102,7 +103,7 @@ EXAMPLES = """ # Load the initial_data fixture into the application - django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }} -#Run syncdb on the application +# Run syncdb on the application - django_manage: > command=syncdb app_path={{ django_dir }} @@ -110,8 +111,11 @@ EXAMPLES = """ pythonpath={{ settings_dir }} virtualenv={{ virtualenv_dir }} -#Run the SmokeTest test case from the main app. Useful for testing deploys. -- django_manage: command=test app_path=django_dir apps=main.SmokeTest +# Run the SmokeTest test case from the main app. Useful for testing deploys. +- django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest + +# Create an initial superuser. +- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }} """ From aa99eade7e45b6995c5cbc364cb45ff9cdbe2598 Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Sat, 27 Dec 2014 20:08:25 -0500 Subject: [PATCH 009/114] ansible-modules-core #530 fix - Mount module does not accept spaces in mount point path --- system/mount.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/system/mount.py b/system/mount.py index 9dc6fbe7b8c..0d78d6791f1 100644 --- a/system/mount.py +++ b/system/mount.py @@ -114,6 +114,11 @@ def set_mount(**kwargs): ) args.update(kwargs) + # save the mount name before space replacement + origname = args['name'] + # replace any space in mount name with '\040' to make it fstab compatible (man fstab) + args['name'] = args['name'].replace(' ', r'\040') + new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' to_write = [] @@ -158,7 +163,8 @@ def set_mount(**kwargs): if changed: write_fstab(to_write, args['fstab']) - return (args['name'], changed) + # mount function needs origname + return (origname, changed) def unset_mount(**kwargs): @@ -173,6 +179,11 @@ def unset_mount(**kwargs): ) args.update(kwargs) + # save the mount name before space replacement + origname = args['name'] + # replace any space in mount name with '\040' to make it fstab compatible (man fstab) + args['name'] = args['name'].replace(' ', r'\040') + to_write = [] changed = False for line in open(args['fstab'], 'r').readlines(): @@ -201,7 +212,8 @@ def unset_mount(**kwargs): if changed: write_fstab(to_write, args['fstab']) - return (args['name'], changed) + # umount needs origname + return (origname, changed) def mount(module, **kwargs): From 9e381264ae599788f77a629ce3ffc7d24cf7c20a Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Wed, 7 Jan 2015 04:47:58 +0000 Subject: [PATCH 010/114] Document globbing support --- commands/command.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/command.py b/commands/command.py index bc286d6855d..f9d2b18c921 100644 --- a/commands/command.py +++ b/commands/command.py @@ -47,12 +47,12 @@ options: aliases: [] creates: description: - - a filename, when it already exists, this step will B(not) be run. + - a filename or glob pattern, when it already exists, this step will B(not) be run. required: no default: null removes: description: - - a filename, when it does not exist, this step will B(not) be run. + - a filename or glob pattern, when it does not exist, this step will B(not) be run. version_added: "0.8" required: no default: null From a8b8128aac9f51241c9a3da74ee28aa59c9ead13 Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Thu, 8 Jan 2015 02:06:47 +0000 Subject: [PATCH 011/114] Remove skipped=True when using creates and removes Based on #8645 --- network/basics/uri.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index aac724a8f13..9be0a06cdce 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -381,7 +381,7 @@ def main(): # of uri executions. creates = os.path.expanduser(creates) if os.path.exists(creates): - module.exit_json(stdout="skipped, since %s exists" % creates, skipped=True, changed=False, stderr=False, rc=0) + module.exit_json(stdout="skipped, since %s exists" % creates, changed=False, stderr=False, rc=0) if removes is not None: # do not run the command if the line contains removes=filename @@ -389,7 +389,7 @@ def main(): # of uri executions. v = os.path.expanduser(removes) if not os.path.exists(removes): - module.exit_json(stdout="skipped, since %s does not exist" % removes, skipped=True, changed=False, stderr=False, rc=0) + module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0) # httplib2 only sends authentication after the server asks for it with a 401. From a935baf7dd24f1f4dd95ca39b8cbbd1c3f17ac66 Mon Sep 17 00:00:00 2001 From: Annika Backstrom Date: Thu, 22 Jan 2015 10:51:09 -0500 Subject: [PATCH 012/114] Force redownload if sha256sum does not match --- network/basics/get_url.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index b0d27859420..1fdb90a9da9 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -217,8 +217,29 @@ def main(): dest_is_dir = os.path.isdir(dest) last_mod_time = None + # Remove any non-alphanumeric characters, including the infamous + # Unicode zero-width space + stripped_sha256sum = re.sub(r'\W+', '', sha256sum) + + # Fail early if sha256 is not supported + if sha256sum != '' and not HAS_HASHLIB: + module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") + if not dest_is_dir and os.path.exists(dest): - if not force: + checksum_mismatch = False + + # If the download is not forced and there is a checksum, allow + # checksum match to skip the download. + if not force and sha256sum != '': + destination_checksum = module.sha256(dest) + + if stripped_sha256sum.lower() == destination_checksum: + module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) + + checksum_mismatch = True + + # Not forcing redownload, unless sha256sum has already failed + if not force and not checksum_mismatch: module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) # If the file already exists, prepare the last modified time for the @@ -281,15 +302,7 @@ def main(): # Check the digest of the destination file and ensure that it matches the # sha256sum parameter if it is present if sha256sum != '': - # Remove any non-alphanumeric characters, including the infamous - # Unicode zero-width space - stripped_sha256sum = re.sub(r'\W+', '', sha256sum) - - if not HAS_HASHLIB: - os.remove(dest) - module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") - else: - destination_checksum = module.sha256(dest) + destination_checksum = module.sha256(dest) if stripped_sha256sum.lower() != destination_checksum: os.remove(dest) From 6d6e948f1e869811f99eb9b3402234ccf3f24716 Mon Sep 17 00:00:00 2001 From: Alexis Seigneurin Date: Mon, 2 Feb 2015 14:51:04 +0100 Subject: [PATCH 013/114] - 'name' should not be required so as to allow uninstalling a cron_file --- system/cron.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/cron.py b/system/cron.py index c0a39b61c61..ed62674c01c 100644 --- a/system/cron.py +++ b/system/cron.py @@ -46,7 +46,7 @@ options: description: - Description of a crontab entry. default: null - required: true + required: false user: description: - The specific user whose crontab should be modified. @@ -397,7 +397,7 @@ def main(): module = AnsibleModule( argument_spec = dict( - name=dict(required=True), + name=dict(required=False), user=dict(required=False), job=dict(required=False), cron_file=dict(required=False), From 80da041eb61c8397b21f0e06d26c7b2c58745417 Mon Sep 17 00:00:00 2001 From: Jens Carl Date: Fri, 20 Feb 2015 17:22:03 +0000 Subject: [PATCH 014/114] - List the name servers of a zone. --- cloud/amazon/route53.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 7fbe8552f41..9454a7ba81b 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -248,7 +248,13 @@ def main(): module.exit_json(changed=False) if command_in == 'get': - module.exit_json(changed=False, set=record) + if type_in == 'NS': + ns = record['values'] + else: + # Retrieve name servers associated to the zone. + ns = conn.get_zone(zone_in).get_nameservers() + + module.exit_json(changed=False, set=record, nameservers=ns) if command_in == 'delete' and not found_record: module.exit_json(changed=False) From d9f8fa56d8af9de402ee9f48ea832709b20754a8 Mon Sep 17 00:00:00 2001 From: gimoh Date: Mon, 23 Feb 2015 14:14:00 +0000 Subject: [PATCH 015/114] Do not insert extra newline if line already contains it When using YAML multi-line strings, e.g.: - lineinfile: dest: /tmp/foo line: > foo bar the line already ends with a newline. If an extra newline is appended unconditionally it will lead to inserting an extra newline on each run. --- files/lineinfile.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/files/lineinfile.py b/files/lineinfile.py index b9fc628e10c..480811dbbfa 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -242,8 +242,11 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, # Don't do backref expansion if not asked. new_line = line - if lines[index[0]] != new_line + os.linesep: - lines[index[0]] = new_line + os.linesep + if not new_line.endswith(os.linesep): + new_line += os.linesep + + if lines[index[0]] != new_line: + lines[index[0]] = new_line msg = 'line replaced' changed = True elif backrefs: From c3f92cca210db1f7042bfce1ff90645255f0b49e Mon Sep 17 00:00:00 2001 From: Stefan Junker Date: Thu, 12 Mar 2015 12:55:14 +0100 Subject: [PATCH 016/114] docker: Use a list instead of a dict for volumes according to the docker-py docs. Do not add host-binds to volumes list. --- cloud/docker/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 6e571a7ba5d..fcc14b5a5e0 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -474,13 +474,13 @@ class DockerManager(object): self.volumes = None if self.module.params.get('volumes'): self.binds = {} - self.volumes = {} + self.volumes = [] vols = self.module.params.get('volumes') for vol in vols: parts = vol.split(":") # regular volume if len(parts) == 1: - self.volumes[parts[0]] = {} + self.volumes.append(parts[0]) # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) elif 2 <= len(parts) <= 3: # default to read-write From 83c6cd04f48c6388a075af5d9a709667b8f007b9 Mon Sep 17 00:00:00 2001 From: Tagir Bakirov Date: Fri, 13 Mar 2015 11:07:13 +0100 Subject: [PATCH 017/114] added 'absent' option to supervisorctl --- web_infrastructure/supervisorctl.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index f75992b9a6a..c3b52d0a79d 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -64,7 +64,7 @@ options: - The desired state of program/group. required: true default: null - choices: [ "present", "started", "stopped", "restarted" ] + choices: [ "present", "started", "stopped", "restarted", "absent" ] supervisorctl_path: description: - path to supervisorctl executable @@ -101,7 +101,7 @@ def main(): username=dict(required=False), password=dict(required=False), supervisorctl_path=dict(required=False), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped']) + state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent']) ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) @@ -196,6 +196,19 @@ def main(): processes = get_matched_processes() + if state == 'absent': + if len(processes) == 0: + module.exit_json(changed=False, name=name, state=state) + + if module.check_mode: + module.exit_json(changed=True) + run_supervisorctl('reread', check_rc=True) + rc, out, err = run_supervisorctl('remove', name) + if '%s: removed process group' % name in out: + module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) + if state == 'present': if len(processes) > 0: module.exit_json(changed=False, name=name, state=state) From bdc28a6bb351688f54ffab8ca6ca7d572e4f8f67 Mon Sep 17 00:00:00 2001 From: Ian Clegg Date: Fri, 20 Mar 2015 10:34:36 +0000 Subject: [PATCH 018/114] Added support for comma seperated feature names in the name parameter of the win_feature module --- windows/win_feature.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_feature.ps1 b/windows/win_feature.ps1 index a54007b47bf..458d942e328 100644 --- a/windows/win_feature.ps1 +++ b/windows/win_feature.ps1 @@ -28,7 +28,7 @@ $result = New-Object PSObject -Property @{ } If ($params.name) { - $name = $params.name + $name = $params.name -split ',' | % { $_.Trim() } } Else { Fail-Json $result "mising required argument: name" From b28459e6f5f5053d7ed0282aa061d994a95feb40 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 30 Mar 2015 15:52:17 -0400 Subject: [PATCH 019/114] Wait for process exit on detached=no. --- cloud/docker/docker.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 7eea7888059..2f5cb8690d9 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -174,7 +174,8 @@ options: default: null detach: description: - - Enable detached mode to leave the container running in background. + - Enable detached mode to leave the container running in background. If + disabled, fail unless the process exits cleanly. default: true state: description: @@ -1258,6 +1259,13 @@ class DockerManager(object): self.client.start(i['Id'], **params) self.increment_counter('started') + if not self.module.params.get('detach'): + status = self.client.wait(i['Id']) + if status != 0: + output = self.client.logs(i['Id'], stdout=True, stderr=True, + stream=False, timestamps=False) + self.module.fail_json(status=status, msg=output) + def stop_containers(self, containers): for i in containers: self.client.stop(i['Id']) From c5f5dfd8a0b73a384a6970113a1012a05bf5276d Mon Sep 17 00:00:00 2001 From: Dan Abel Date: Fri, 31 Oct 2014 14:46:32 +0000 Subject: [PATCH 020/114] use aws connect calls that allow boto profile use --- cloud/amazon/s3.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index e7d017f58ea..2ca36a85bdc 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -116,6 +116,7 @@ from boto.s3.connection import OrdinaryCallingFormat try: import boto + import boto.ec2 from boto.s3.connection import Location from boto.s3.connection import S3Connection except ImportError: From f38186ce8b49ea98e29241712da45917a3154e73 Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Fri, 3 Apr 2015 12:41:10 -0700 Subject: [PATCH 021/114] ansible_facts reflects 1.8 output --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 49e11f3caa0..6cb9802410e 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1504,7 +1504,7 @@ def main(): summary=manager.counters, containers=containers.changed, reload_reasons=manager.get_reload_reason_message(), - ansible_facts=_ansible_facts(containers.changed)) + ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed))) except DockerAPIError as e: module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) From 3b954edab2bf54c710b86d95482548b893d648fa Mon Sep 17 00:00:00 2001 From: Lucas David Traverso Date: Sun, 19 Apr 2015 04:39:59 -0300 Subject: [PATCH 022/114] django_manage: Use shebang in manage.py instead of hardcode python --- web_infrastructure/django_manage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 46ebb2fb8f1..c721456715f 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -234,7 +234,7 @@ def main(): _ensure_virtualenv(module) - cmd = "python manage.py %s" % (command, ) + cmd = "./manage.py %s" % (command, ) if command in noinput_commands: cmd = '%s --noinput' % cmd From fff29f049e1f7b2103f4527ae440c92950ade6b0 Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Fri, 8 May 2015 01:40:10 +0900 Subject: [PATCH 023/114] Not use "is" to compare strings As "is" tests whether if operands are the same object rather than they have the same value, potentially causes a wrong result. --- system/service.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/system/service.py b/system/service.py index 3589340f152..5627f128c92 100644 --- a/system/service.py +++ b/system/service.py @@ -862,7 +862,7 @@ class LinuxService(Service): if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed: self.execute_command("%s zap" % svc_cmd, daemonize=True) - if self.action is not "restart": + if self.action != "restart": if svc_cmd != '': # upstart or systemd or OpenRC rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True) @@ -970,11 +970,11 @@ class FreeBsdService(Service): def service_control(self): - if self.action is "start": + if self.action == "start": self.action = "onestart" - if self.action is "stop": + if self.action == "stop": self.action = "onestop" - if self.action is "reload": + if self.action == "reload": self.action = "onereload" return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments)) @@ -1180,9 +1180,9 @@ class NetBsdService(Service): self.running = True def service_control(self): - if self.action is "start": + if self.action == "start": self.action = "onestart" - if self.action is "stop": + if self.action == "stop": self.action = "onestop" self.svc_cmd = "%s" % self.svc_initscript From a0b57f3aab09c16b7b16a2e908c4067bfb194e8e Mon Sep 17 00:00:00 2001 From: Vasyl Kaigorodov Date: Fri, 15 May 2015 15:28:28 +0200 Subject: [PATCH 024/114] GCE module: add posibility to specify Service Account permissions during instance creation --- cloud/google/gce.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 314f1200161..20ceb257b3a 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -287,6 +287,8 @@ def create_instances(module, gce, instance_names): ip_forward = module.params.get('ip_forward') external_ip = module.params.get('external_ip') disk_auto_delete = module.params.get('disk_auto_delete') + service_account_permissions = module.params.get('service_account_permissions') + service_account_email = module.params.get('service_account_email') if external_ip == "none": external_ip = None @@ -330,6 +332,14 @@ def create_instances(module, gce, instance_names): items.append({"key": k,"value": v}) metadata = {'items': items} + ex_sa_perms = [] + if service_account_permissions: + if service_account_email: + ex_sa_perms.append({'email': service_account_email}) + else: + ex_sa_perms.append({'email': "default"}) + ex_sa_perms[0]['scopes'] = service_account_permissions + # These variables all have default values but check just in case if not lc_image or not lc_network or not lc_machine_type or not lc_zone: module.fail_json(msg='Missing required create instance variable', @@ -349,7 +359,7 @@ def create_instances(module, gce, instance_names): inst = gce.create_node(name, lc_machine_type, lc_image, location=lc_zone, ex_network=network, ex_tags=tags, ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward, - external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete) + external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, ex_service_accounts=ex_sa_perms) changed = True except ResourceExistsError: inst = gce.ex_get_node(name, lc_zone) @@ -437,6 +447,7 @@ def main(): tags = dict(type='list'), zone = dict(default='us-central1-a'), service_account_email = dict(), + service_account_permissions = dict(type='list'), pem_file = dict(), project_id = dict(), ip_forward = dict(type='bool', default=False), From f714cc5f7ecb7d8f8bf994276292db6e72caa0a2 Mon Sep 17 00:00:00 2001 From: Vasyl Kaigorodov Date: Fri, 15 May 2015 15:34:36 +0200 Subject: [PATCH 025/114] GCE module: document Service Account permissions parameter usage --- cloud/google/gce.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 20ceb257b3a..261f6d32297 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -58,6 +58,13 @@ options: required: false default: null aliases: [] + service_account_permissions: + version_added: 1.5.1 + description: + - service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information) + required: false + default: null + aliases: [] pem_file: version_added: 1.5.1 description: From fa9727eb99fdd0c38ed7f3ba72cdf31c69e82a61 Mon Sep 17 00:00:00 2001 From: Vasyl Kaigorodov Date: Fri, 15 May 2015 16:00:24 +0200 Subject: [PATCH 026/114] GCE module: added Service Account permissions sanity checks --- cloud/google/gce.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 261f6d32297..b288d9dfb43 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -340,7 +340,13 @@ def create_instances(module, gce, instance_names): metadata = {'items': items} ex_sa_perms = [] + bad_perms = [] if service_account_permissions: + for perm in service_account_permissions: + if not perm in gce.SA_SCOPES_MAP.keys(): + bad_perms.append(perm) + if len(bad_perms) > 0: + module.fail_json(msg='bad permissions: %s' % str(bad_perms)) if service_account_email: ex_sa_perms.append({'email': service_account_email}) else: From 3f679933a6695f91d24d0ed02b52f9caab2d4e5d Mon Sep 17 00:00:00 2001 From: Ryan Walls Date: Fri, 22 May 2015 16:36:38 -0600 Subject: [PATCH 027/114] Add support for --log-driver option that docker released with Docker 1.6 --- cloud/docker/docker.py | 116 +++++++++++++++++++++++++++-------------- 1 file changed, 76 insertions(+), 40 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index cb6d3dae075..e4c27797b71 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -92,6 +92,21 @@ options: - 'alias. Use docker CLI-style syntax: C(redis:myredis).' default: null version_added: "1.5" + log_driver: + description: + - You can specify a different logging driver for the container than for the daemon. + "json-file" Default logging driver for Docker. Writes JSON messages to file. + docker logs command is available only for this logging driver. + "none" disables any logging for the container. docker logs won't be available with this driver. + "syslog" Syslog logging driver for Docker. Writes log messages to syslog. + docker logs command is not available for this logging driver. + Requires docker >= 1.6.0. + required: false + default: json-file + choices: + - json-file + - none + - syslog memory_limit: description: - RAM allocated to the container as a number of bytes or as a human-readable @@ -506,6 +521,7 @@ class DockerManager(object): 'restart_policy': ((0, 5, 0), '1.14'), 'extra_hosts': ((0, 7, 0), '1.3.1'), 'pid': ((1, 0, 0), '1.17'), + 'log_driver': ((1, 2, 0), '1.18'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -1110,6 +1126,15 @@ class DockerManager(object): self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) differing.append(container) + # LOG_DRIVER + + expected_log_driver = set(self.module.params.get('log_driver') or []) + actual_log_driver = set(container['HostConfig']['LogConfig'] or []) + if actual_log_driver != expected_log_driver: + self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) + differing.append(container) + continue + return differing def get_deployed_containers(self): @@ -1206,44 +1231,7 @@ class DockerManager(object): except Exception as e: self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) - def create_containers(self, count=1): - try: - mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) - except ValueError as e: - self.module.fail_json(msg=str(e)) - - params = {'image': self.module.params.get('image'), - 'command': self.module.params.get('command'), - 'ports': self.exposed_ports, - 'volumes': self.volumes, - 'mem_limit': mem_limit, - 'environment': self.env, - 'hostname': self.module.params.get('hostname'), - 'domainname': self.module.params.get('domainname'), - 'detach': self.module.params.get('detach'), - 'name': self.module.params.get('name'), - 'stdin_open': self.module.params.get('stdin_open'), - 'tty': self.module.params.get('tty'), - } - - def do_create(count, params): - results = [] - for _ in range(count): - result = self.client.create_container(**params) - self.increment_counter('created') - results.append(result) - - return results - - try: - containers = do_create(count, params) - except: - self.pull_image() - containers = do_create(count, params) - - return containers - - def start_containers(self, containers): + def create_host_config(self): params = { 'lxc_conf': self.lxc_conf, 'binds': self.binds, @@ -1256,7 +1244,7 @@ class DockerManager(object): optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid', 'extra_hosts'): + 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver'): optionals[optional_param] = self.module.params.get(optional_param) if optionals['dns'] is not None: @@ -1281,8 +1269,55 @@ class DockerManager(object): self.ensure_capability('extra_hosts') params['extra_hosts'] = optionals['extra_hosts'] + if optionals['log_driver'] is not None: + self.ensure_capability('log_driver') + log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) + log_config.type = optionals['log_driver'] + params['log_config'] = log_config + + return docker.utils.create_host_config(**params) + + def create_containers(self, count=1): + try: + mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) + except ValueError as e: + self.module.fail_json(msg=str(e)) + + params = {'image': self.module.params.get('image'), + 'command': self.module.params.get('command'), + 'ports': self.exposed_ports, + 'volumes': self.volumes, + 'mem_limit': mem_limit, + 'environment': self.env, + 'hostname': self.module.params.get('hostname'), + 'domainname': self.module.params.get('domainname'), + 'detach': self.module.params.get('detach'), + 'name': self.module.params.get('name'), + 'stdin_open': self.module.params.get('stdin_open'), + 'tty': self.module.params.get('tty'), + 'host_config': self.create_host_config(), + } + + def do_create(count, params): + results = [] + for _ in range(count): + result = self.client.create_container(**params) + self.increment_counter('created') + results.append(result) + + return results + + try: + containers = do_create(count, params) + except: + self.pull_image() + containers = do_create(count, params) + + return containers + + def start_containers(self, containers): for i in containers: - self.client.start(i['Id'], **params) + self.client.start(i) self.increment_counter('started') def stop_containers(self, containers): @@ -1475,6 +1510,7 @@ def main(): net = dict(default=None), pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), + log_driver = dict(default='json-file', choices=['json-file', 'none', 'syslog']), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From 41049042de1fc302f05d668b58617a6d2b26a2ea Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Fri, 22 May 2015 18:57:06 -0400 Subject: [PATCH 028/114] remove blank lines from htpasswd file used standard mktemp() --- web_infrastructure/htpasswd.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 4a72ea37fec..03cd6a5b253 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -198,6 +198,30 @@ def main(): if not passlib_installed: module.fail_json(msg="This module requires the passlib Python library") + # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. + f = open(path, "r") + try: + lines=f.readlines() + finally: + f.close + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + f = open(path,"w") + try: + [f.write(line) for line in lines if line.strip() ] + finally: + f.close + try: if state == 'present': (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) From 5f1d88a8299ca11bc9d1cf64f22eaa03fd8e4565 Mon Sep 17 00:00:00 2001 From: Ryan Walls Date: Wed, 27 May 2015 12:33:11 -0600 Subject: [PATCH 029/114] Update docker module to look at log_driver variable when deciding if container configuration has changed. --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e4c27797b71..d765ce00c66 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1129,7 +1129,7 @@ class DockerManager(object): # LOG_DRIVER expected_log_driver = set(self.module.params.get('log_driver') or []) - actual_log_driver = set(container['HostConfig']['LogConfig'] or []) + actual_log_driver = set(container['HostConfig']['LogConfig']['Type'] or []) if actual_log_driver != expected_log_driver: self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) differing.append(container) From c95717afe582ae889f781e44e58683b77657d1e9 Mon Sep 17 00:00:00 2001 From: Ryan Walls Date: Thu, 28 May 2015 11:36:20 -0600 Subject: [PATCH 030/114] Set default "log_driver" option to None in docker module. --- cloud/docker/docker.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index d765ce00c66..e22f8ff3edd 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -100,6 +100,7 @@ options: "none" disables any logging for the container. docker logs won't be available with this driver. "syslog" Syslog logging driver for Docker. Writes log messages to syslog. docker logs command is not available for this logging driver. + If not defined explicitly, the Docker daemon's default ("json-file") will apply. Requires docker >= 1.6.0. required: false default: json-file @@ -1510,7 +1511,7 @@ def main(): net = dict(default=None), pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), - log_driver = dict(default='json-file', choices=['json-file', 'none', 'syslog']), + log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From 88eddb13c01ff6f18c86a7b391ca2478a7fa05c7 Mon Sep 17 00:00:00 2001 From: Ryan Walls Date: Mon, 1 Jun 2015 09:48:24 -0600 Subject: [PATCH 031/114] Update docker module to avoid false positives when containers are first created. Also have the module check for api compatibility before trying to set a "--log-driver" option. --- cloud/docker/docker.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e22f8ff3edd..977969da03f 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1129,12 +1129,13 @@ class DockerManager(object): # LOG_DRIVER - expected_log_driver = set(self.module.params.get('log_driver') or []) - actual_log_driver = set(container['HostConfig']['LogConfig']['Type'] or []) - if actual_log_driver != expected_log_driver: - self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) - differing.append(container) - continue + if self.ensure_capability('log_driver', false) : + expected_log_driver = self.module.params.get('log_driver') or 'json-file' + actual_log_driver = container['HostConfig']['LogConfig']['Type'] + if actual_log_driver != expected_log_driver: + self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) + differing.append(container) + continue return differing From a2630d40fb2239bf80509dc3843df9787258908e Mon Sep 17 00:00:00 2001 From: Ryan Walls Date: Tue, 2 Jun 2015 00:09:01 -0600 Subject: [PATCH 032/114] Fix typo in Docker module. --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 977969da03f..c4f8e3e9f0b 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1129,7 +1129,7 @@ class DockerManager(object): # LOG_DRIVER - if self.ensure_capability('log_driver', false) : + if self.ensure_capability('log_driver', False) : expected_log_driver = self.module.params.get('log_driver') or 'json-file' actual_log_driver = container['HostConfig']['LogConfig']['Type'] if actual_log_driver != expected_log_driver: From 361a1e1b65e65fff29a9fb8555e7559b54545e9e Mon Sep 17 00:00:00 2001 From: Igor Khomyakov Date: Fri, 9 Jan 2015 16:57:20 +0300 Subject: [PATCH 033/114] Check if a service exists --- web_infrastructure/supervisorctl.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index f75992b9a6a..f0cfa691c27 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -30,7 +30,7 @@ version_added: "0.7" options: name: description: - - The name of the supervisord program or group to manage. + - The name of the supervisord program or group to manage. - The name will be taken as group name when it ends with a colon I(:) - Group support is only available in Ansible version 1.6 or later. required: true @@ -192,9 +192,14 @@ def main(): if state == 'restarted': rc, out, err = run_supervisorctl('update') processes = get_matched_processes() + if not processes: + module.fail_json(name=name, msg="ERROR (no such process)") + take_action_on_processes(processes, lambda s: True, 'restart', 'started') processes = get_matched_processes() + if not processes: + module.fail_json(name=name, msg="ERROR (no such process)") if state == 'present': if len(processes) > 0: From 68cfbca624c118d85ee1ee2547755cfd356b156d Mon Sep 17 00:00:00 2001 From: toninog Date: Mon, 8 Jun 2015 14:21:49 +0100 Subject: [PATCH 034/114] Fixed more issues with the delete_group and paramater mismatch --- cloud/amazon/iam.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index d496a7a40c7..3cfca55135c 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -133,7 +133,7 @@ task: iam_type: user name: jdavila state: update - groups: "{{ item.created_group.group_name }}" + group: "{{ item.created_group.group_name }}" with_items: new_groups.results ''' @@ -388,7 +388,7 @@ def create_group(module=None, iam=None, name=None, path=None): return name, changed -def delete_group(module, iam, name): +def delete_group(module=None, iam=None, name=None): changed = False try: iam.delete_group(name) @@ -662,7 +662,7 @@ def main(): group_exists = name in orig_group_list if state == 'present' and not group_exists: - new_group, changed = create_group(iam, name, path) + new_group, changed = create_group(iam=iam, name=name, path=path) module.exit_json(changed=changed, group_name=new_group) elif state in ['present', 'update'] and group_exists: changed, updated_name, updated_path, cur_path = update_group( @@ -690,7 +690,7 @@ def main(): changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) elif state == 'absent': if name in orig_group_list: - removed_group, changed = delete_group(iam, name) + removed_group, changed = delete_group(iam=iam, name=name) module.exit_json(changed=changed, delete_group=removed_group) else: module.exit_json(changed=changed, msg="Group already absent") From fccc925ec51cc4c02236f1c5fbd3c1cfe94c0374 Mon Sep 17 00:00:00 2001 From: toninog Date: Tue, 9 Jun 2015 15:27:15 +0100 Subject: [PATCH 035/114] fixes to code to enable updates of user to groups and delete groups. Fixed example yaml to use groups --- cloud/amazon/iam.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index a4111ee5477..70ae9ba75a5 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -133,7 +133,7 @@ task: iam_type: user name: jdavila state: update - group: "{{ item.created_group.group_name }}" + groups: "{{ item.created_group.group_name }}" with_items: new_groups.results ''' @@ -376,7 +376,7 @@ new_name=None): return (groups, changed) -def create_group(module, iam, name, path): +def create_group(module=None, iam=None, name=None, path=None): changed = False try: iam.create_group( @@ -388,7 +388,7 @@ def create_group(module, iam, name, path): return name, changed -def delete_group(module, iam, name): +def delete_group(module=None, iam=None, name=None): changed = False try: iam.delete_group(name) @@ -414,8 +414,7 @@ def delete_group(module, iam, name): changed = True return changed, name - -def update_group(module, iam, name, new_name, new_path): +def update_group(module=None, iam=None, name=None, new_name=None, new_path=None): changed = False try: current_group_path = iam.get_group( @@ -663,11 +662,11 @@ def main(): group_exists = name in orig_group_list if state == 'present' and not group_exists: - new_group, changed = create_group(iam, name, path) + new_group, changed = create_group(iam=iam, name=name, path=path) module.exit_json(changed=changed, group_name=new_group) elif state in ['present', 'update'] and group_exists: changed, updated_name, updated_path, cur_path = update_group( - iam, name, new_name, new_path) + iam=iam, name=name, new_name=new_name, new_path=new_path) if new_path and new_name: module.exit_json(changed=changed, old_group_name=name, @@ -691,7 +690,7 @@ def main(): changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) elif state == 'absent': if name in orig_group_list: - removed_group, changed = delete_group(iam, name) + removed_group, changed = delete_group(iam=iam, name=name) module.exit_json(changed=changed, delete_group=removed_group) else: module.exit_json(changed=changed, msg="Group already absent") From 558f2ace1f3448dd50c17d38de9a50f5850c050a Mon Sep 17 00:00:00 2001 From: Ed Hein Date: Fri, 12 Jun 2015 12:36:52 +0200 Subject: [PATCH 036/114] Fix computation of port bindings. Port bindings configuration can be a list if several host ports are bound to the same guest port. --- cloud/docker/docker.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index cb6d3dae075..b04b6ee335a 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1041,15 +1041,14 @@ class DockerManager(object): for container_port, config in self.port_bindings.iteritems(): if isinstance(container_port, int): container_port = "{0}/tcp".format(container_port) - bind = {} if len(config) == 1: - bind['HostIp'] = "0.0.0.0" - bind['HostPort'] = "" + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for hostip, hostport in config: + expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)}) else: - bind['HostIp'] = config[0] - bind['HostPort'] = str(config[1]) - - expected_bound_ports[container_port] = [bind] + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] actual_bound_ports = container['HostConfig']['PortBindings'] or {} From dadc1faebd9a177f66f39830d4c65efe9d559870 Mon Sep 17 00:00:00 2001 From: Konstantin Gribov Date: Tue, 2 Jun 2015 16:14:07 +0300 Subject: [PATCH 037/114] Escape spaces, backslashes and ampersands in fstab Fixes #530. It's more generic than #578 which only fixes spaces escaping in name (target dir to mount). Escaping is used in both `set_mount` (important for `src`, `name` and `opts`) and `unset_mount` (for `name`). It's shouldn't be used in `mount` and `umount` since `name` parameter is passed as array element to `module.run_command`. Signed-off-by: Konstantin Gribov --- system/mount.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/system/mount.py b/system/mount.py index e11d497220b..d41d1f936e2 100644 --- a/system/mount.py +++ b/system/mount.py @@ -102,6 +102,10 @@ def write_fstab(lines, dest): fs_w.flush() fs_w.close() +def _escape_fstab(v): + """ escape space (040), ampersand (046) and backslash (134) which are invalid in fstab fields """ + return v.replace('\\', '\\134').replace(' ', '\\040').replace('&', '\\046') + def set_mount(**kwargs): """ set/change a mount point location in fstab """ @@ -119,6 +123,7 @@ def set_mount(**kwargs): to_write = [] exists = False changed = False + escaped_args = dict([(k, _escape_fstab(v)) for k, v in args.iteritems()]) for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) @@ -135,16 +140,16 @@ def set_mount(**kwargs): ld = {} ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - if ld['name'] != args['name']: + if ld['name'] != escaped_args['name']: to_write.append(line) continue # it exists - now see if what we have is different exists = True for t in ('src', 'fstype','opts', 'dump', 'passno'): - if ld[t] != args[t]: + if ld[t] != escaped_args[t]: changed = True - ld[t] = args[t] + ld[t] = escaped_args[t] if changed: to_write.append(new_line % ld) @@ -175,6 +180,7 @@ def unset_mount(**kwargs): to_write = [] changed = False + escaped_name = _escape_fstab(args['name']) for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) @@ -191,7 +197,7 @@ def unset_mount(**kwargs): ld = {} ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - if ld['name'] != args['name']: + if ld['name'] != escaped_name: to_write.append(line) continue @@ -260,8 +266,6 @@ def main(): args['passno'] = module.params['passno'] if module.params['opts'] is not None: args['opts'] = module.params['opts'] - if ' ' in args['opts']: - module.fail_json(msg="unexpected space in 'opts' parameter") if module.params['dump'] is not None: args['dump'] = module.params['dump'] if module.params['fstab'] is not None: From 759c2de7f98b3bf0979cafb804df982d27dcf5fd Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 18 Jun 2015 07:56:50 -0400 Subject: [PATCH 038/114] Add filter ability --- cloud/openstack/os_client_config.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 100608b0fd0..a12cd8fe65a 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -25,6 +25,15 @@ short_description: Get OpenStack Client config description: - Get I(openstack) client config data from clouds.yaml or environment version_added: "2.0" +notes: + - Facts are placed in the C(openstack.clouds) variable. +options: + clouds: + description: + - List of clouds to limit the return list to. No value means return + information on all configured clouds + required: false + default: [] requirements: [ os-client-config ] author: "Monty Taylor (@emonty)" ''' @@ -34,19 +43,27 @@ EXAMPLES = ''' - os-client-config: - debug: var={{ item }} with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}" + +# Get the information back just about the mordred cloud +- os-client-config: + clouds: + - mordred ''' def main(): - module = AnsibleModule({}) + module = AnsibleModule({ + clouds=dict(required=False, default=[]), + }) p = module.params try: config = os_client_config.OpenStackConfig() clouds = [] for cloud in config.get_all_clouds(): - cloud.config['name'] = cloud.name - clouds.append(cloud.config) + if not module.params['clouds'] or cloud.name in module.param['clouds']: + cloud.config['name'] = cloud.name + clouds.append(cloud.config) module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds))) except exceptions.OpenStackConfigException as e: module.fail_json(msg=str(e)) From a226701efe836e3c288a1624dfd820928dcd0c16 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 16:37:07 -0400 Subject: [PATCH 039/114] Add OpenStack Keypair module Also deprecate old nova_keypair module. --- .../{nova_keypair.py => _nova_keypair.py} | 1 + cloud/openstack/os_keypair.py | 140 ++++++++++++++++++ 2 files changed, 141 insertions(+) rename cloud/openstack/{nova_keypair.py => _nova_keypair.py} (99%) create mode 100644 cloud/openstack/os_keypair.py diff --git a/cloud/openstack/nova_keypair.py b/cloud/openstack/_nova_keypair.py similarity index 99% rename from cloud/openstack/nova_keypair.py rename to cloud/openstack/_nova_keypair.py index b2e38ff7db9..68df0c5a2c4 100644 --- a/cloud/openstack/nova_keypair.py +++ b/cloud/openstack/_nova_keypair.py @@ -32,6 +32,7 @@ version_added: "1.2" author: - "Benno Joy (@bennojoy)" - "Michael DeHaan" +deprecated: Deprecated in 2.0. Use os_keypair instead short_description: Add/Delete key pair from nova description: - Add or Remove key pair from nova . diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py new file mode 100644 index 00000000000..c4725552725 --- /dev/null +++ b/cloud/openstack/os_keypair.py @@ -0,0 +1,140 @@ +#!/usr/bin/python + +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# Copyright (c) 2013, John Dewey +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_keypair +short_description: Add/Delete a keypair from OpenStack +extends_documentation_fragment: openstack +version_added: "2.0" +description: + - Add or Remove key pair from OpenStack +options: + name: + description: + - Name that has to be given to the key pair + required: true + default: None + public_key: + description: + - The public key that would be uploaded to nova and injected to vm's upon creation + required: false + default: None + public_key_file: + description: + - Path to local file containing ssh public key. Mutually exclusive with public_key + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Creates a key pair with the running users public key +- os_keypair: + cloud: mordred + state: present + name: ansible_key + public_key: "{{ lookup('file','~/.ssh/id_rsa.pub') }}" + +# Creates a new key pair and the private key returned after the run. +- os_keypair: + cloud: rax-dfw + state: present + name: ansible_key +''' + + +def main(): + argument_spec = openstack_full_argument_spec( + name = dict(required=True), + public_key = dict(default=None), + public_key_file = dict(default=None), + state = dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[['public_key', 'public_key_file']]) + module = AnsibleModule(argument_spec, **module_kwargs) + + if module.params['public_key_file']: + public_key = open(module.params['public_key_file']).read() + else: + public_key = module.params['public_key'] + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + name = module.params['name'] + public_key = module.params['public_key'] + + try: + cloud = shade.openstack_cloud(**module.params) + + if state == 'present': + for key in cloud.list_keypairs(): + if key.name == name: + if public_key and (public_key != key.public_key): + module.fail_json( + msg="Key name %s present but key hash not the same" + " as offered. Delete key first." % key.name + ) + else: + module.exit_json(changed=False, result="Key present") + try: + key = cloud.create_keypair(name, public_key) + except Exception, e: + module.exit_json( + msg="Error in creating the keypair: %s" % e.message + ) + if not public_key: + module.exit_json(changed=True, key=key.private_key) + module.exit_json(changed=True, key=None) + + elif state == 'absent': + for key in cloud.list_keypairs(): + if key.name == name: + try: + cloud.delete_keypair(name) + except Exception, e: + module.fail_json( + msg="Keypair deletion has failed: %s" % e.message + ) + module.exit_json(changed=True, result="deleted") + module.exit_json(changed=False, result="not present") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From 82dc5c4394ab88e055debed6b0d7d397f11638d7 Mon Sep 17 00:00:00 2001 From: Davide Guerri Date: Thu, 4 Jun 2015 19:30:34 +0100 Subject: [PATCH 040/114] Avoind using lookup() in documentation lookup() is currently broken (current Ansible devel branch), so better to avoid it in our examples. --- cloud/openstack/os_keypair.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index c4725552725..c6794b47826 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -63,7 +63,7 @@ EXAMPLES = ''' cloud: mordred state: present name: ansible_key - public_key: "{{ lookup('file','~/.ssh/id_rsa.pub') }}" + public_key_file: ~/.ssh/id_rsa.pub # Creates a new key pair and the private key returned after the run. - os_keypair: From 02d0a73906bcd6e1c8805825a23b49df027c65a9 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 18 Jun 2015 07:59:32 -0400 Subject: [PATCH 041/114] Move the order of argument processing --- cloud/openstack/os_keypair.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index c6794b47826..b404e6cc02a 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -84,18 +84,16 @@ def main(): mutually_exclusive=[['public_key', 'public_key_file']]) module = AnsibleModule(argument_spec, **module_kwargs) - if module.params['public_key_file']: - public_key = open(module.params['public_key_file']).read() - else: - public_key = module.params['public_key'] - - if not HAS_SHADE: - module.fail_json(msg='shade is required for this module') - state = module.params['state'] name = module.params['name'] public_key = module.params['public_key'] + if module.params['public_key_file']: + public_key = open(module.params['public_key_file']).read() + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + try: cloud = shade.openstack_cloud(**module.params) From d7f65af6d934759b1c53bbeef010d03d99da241b Mon Sep 17 00:00:00 2001 From: dagnello Date: Fri, 19 Jun 2015 10:45:12 -0700 Subject: [PATCH 042/114] Resolving secgroup.id issue in this module secgroup['id'] was not being returned in all cases where the specified security group exists. --- cloud/openstack/os_security_group.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 51e7df772a1..86e6de0b023 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -48,6 +48,8 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present + +requirements: ["shade"] ''' EXAMPLES = ''' @@ -114,24 +116,24 @@ def main(): if module.check_mode: module.exit_json(changed=_system_state_change(module, secgroup)) - changed = False if state == 'present': if not secgroup: secgroup = cloud.create_security_group(name, description) - changed = True + module.exit_json(changed=True, id=secgroup['id']) else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - changed = True - module.exit_json( - changed=changed, id=secgroup.id, secgroup=secgroup) + module.exit_json(changed=True, id=secgroup['id']) + else: + module.exit_json(changed=False, id=secgroup['id']) if state == 'absent': - if secgroup: + if not secgroup: + module.exit_json(changed=False) + else: cloud.delete_security_group(secgroup['id']) - changed=True - module.exit_json(changed=changed) + module.exit_json(changed=True) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 94a8b6dcccfcceb6cbec876ad957ed2c0a105c19 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 19 Jun 2015 17:06:12 -0400 Subject: [PATCH 043/114] Make sure we're always returning objects too --- cloud/openstack/os_security_group.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 86e6de0b023..7fba28c8cb9 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -116,24 +116,24 @@ def main(): if module.check_mode: module.exit_json(changed=_system_state_change(module, secgroup)) + changed = False if state == 'present': if not secgroup: secgroup = cloud.create_security_group(name, description) - module.exit_json(changed=True, id=secgroup['id']) + changed = True else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - module.exit_json(changed=True, id=secgroup['id']) - else: - module.exit_json(changed=False, id=secgroup['id']) + changed = True + module.exit_json( + changed=changed, id=secgroup['id'], secgroup=secgroup) if state == 'absent': - if not secgroup: - module.exit_json(changed=False) - else: + if secgroup: cloud.delete_security_group(secgroup['id']) - module.exit_json(changed=True) + changed = True + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 850ed126e2500265a4f43c5ee5c8aa00de39796a Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 19 Jun 2015 17:39:57 -0400 Subject: [PATCH 044/114] Remove duplicate shade requirement --- cloud/openstack/os_security_group.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 7fba28c8cb9..e42b7f938f5 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -48,8 +48,6 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present - -requirements: ["shade"] ''' EXAMPLES = ''' From 9040c2f75cf3b1a36934ad7cf46a66ada211de71 Mon Sep 17 00:00:00 2001 From: murdochr Date: Sat, 20 Jun 2015 21:36:10 +0100 Subject: [PATCH 045/114] Change docs to reflect correct when syntax for matching variable strings as per MD's forum post as this fails with unhelpful error otherwise. https://groups.google.com/forum/#!topic/ansible-project/D2hQzZ_jNuM --- network/basics/uri.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index b7fa8282c83..3de17c12d60 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -156,7 +156,7 @@ EXAMPLES = ''' register: webpage - action: fail - when: 'AWESOME' not in "{{ webpage.content }}" + when: "'illustrative' not in webpage.content" # Create a JIRA issue From 9d833d1d4c1c12e846ae70fff50e54bd2d322e36 Mon Sep 17 00:00:00 2001 From: Hagai Kariti Date: Tue, 30 Sep 2014 11:13:54 +0300 Subject: [PATCH 046/114] Hostname module should update ansible_hostname --- system/hostname.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/hostname.py b/system/hostname.py index 882402a5e21..d9193641eb2 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -509,6 +509,6 @@ def main(): hostname.set_permanent_hostname(name) changed = True - module.exit_json(changed=changed, name=name) + module.exit_json(changed=changed, name=name, ansible_facts=dict(ansible_hostname=name)) main() From 1cfa21829b73b3c3ecbd55e273381842e4a495cd Mon Sep 17 00:00:00 2001 From: Michal Smereczynski Date: Mon, 22 Jun 2015 13:10:09 +0200 Subject: [PATCH 047/114] Added new Premium Storage instance sizes and case-related documentation clarification. --- cloud/azure/azure.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index 5469e385da1..f1eea46525e 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -53,7 +53,7 @@ options: default: null role_size: description: - - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6) + - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location. required: false default: Small endpoints: @@ -235,6 +235,14 @@ AZURE_ROLE_SIZES = ['ExtraSmall', 'Standard_D12', 'Standard_D13', 'Standard_D14', + 'Standard_DS1', + 'Standard_DS2', + 'Standard_DS3', + 'Standard_DS4', + 'Standard_DS11', + 'Standard_DS12', + 'Standard_DS13', + 'Standard_DS14', 'Standard_G1', 'Standard_G2', 'Standard_G3', From 74d9f74536e3ee21445a4e9cd4e33ae773590348 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Mon, 22 Jun 2015 14:52:45 +0100 Subject: [PATCH 048/114] Parse out space characters in route53 value list Fixes: https://github.com/ansible/ansible-modules-core/issues/992 --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 67700060d9f..d25be6b99ea 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -224,7 +224,7 @@ def main(): if type(value_in) is str: if value_in: - value_list = sorted(value_in.split(',')) + value_list = sorted([s.strip() for s in value_in.split(',')]) elif type(value_in) is list: value_list = sorted(value_in) From 3b4b065315072207537d01770a79584e2a01d0a4 Mon Sep 17 00:00:00 2001 From: Bryan Fleming Date: Wed, 6 May 2015 16:44:40 -0500 Subject: [PATCH 049/114] fixes #1120 - privileges using columns --- database/mysql/mysql_user.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 763e0e7ebd5..0ff290f1185 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -157,6 +157,7 @@ password=n<_665{vS43y import getpass import tempfile +import re try: import MySQLdb except ImportError: @@ -316,13 +317,19 @@ def privileges_unpack(priv): not specified in the string, as MySQL will always provide this by default. """ output = {} + privs = [] for item in priv.strip().split('/'): pieces = item.strip().split(':') dbpriv = pieces[0].rsplit(".", 1) pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1]) - - output[pieces[0]] = [s.strip() for s in pieces[1].upper().split(',')] - new_privs = frozenset(output[pieces[0]]) + if '(' in pieces[1]: + output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper()) + for i in output[pieces[0]]: + privs.append(re.sub(r'\(.*\)','',i)) + else: + output[pieces[0]] = pieces[1].upper().split(',') + privs = output[pieces[0]] + new_privs = frozenset(privs) if not new_privs.issubset(VALID_PRIVS): raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) From 2f8300087e54f57cf3482cb75ce7633b805d9fbb Mon Sep 17 00:00:00 2001 From: Andrea Mandolo Date: Tue, 23 Jun 2015 07:14:30 +0200 Subject: [PATCH 050/114] Added "EC2 instance" termination_protection and source_dest_check changeability at run-time --- cloud/amazon/ec2.py | 53 +++++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 20d49ce5995..dc7d5d38dd3 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -190,6 +190,13 @@ options: required: false default: yes choices: [ "yes", "no" ] + termination_protection: + version_added: "2.0" + description: + - Enable or Disable the Termination Protection + required: false + default: no + choices: [ "yes", "no" ] state: version_added: "1.3" description: @@ -786,6 +793,7 @@ def create_instances(module, ec2, vpc, override_count=None): exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') source_dest_check = module.boolean(module.params.get('source_dest_check')) + termination_protection = module.boolean(module.params.get('termination_protection')) # group_id and group_name are exclusive of each other if group_id and group_name: @@ -1014,11 +1022,16 @@ def create_instances(module, ec2, vpc, override_count=None): for res in res_list: running_instances.extend(res.instances) - # Enabled by default by Amazon - if not source_dest_check: + # Enabled by default by AWS + if source_dest_check is False: for inst in res.instances: inst.modify_attribute('sourceDestCheck', False) + # Disabled by default by AWS + if termination_protection is True: + for inst in res.instances: + inst.modify_attribute('disableApiTermination', True) + # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound if instance_tags: try: @@ -1135,21 +1148,32 @@ def startstop_instances(module, ec2, instance_ids, state): if not isinstance(instance_ids, list) or len(instance_ids) < 1: module.fail_json(msg='instance_ids should be a list of instances, aborting') - # Check that our instances are not in the state we want to take them to - # and change them to our desired state + # Check (and eventually change) instances attributes and instances state running_instances_array = [] for res in ec2.get_all_instances(instance_ids): for inst in res.instances: - if inst.state != state: - instance_dict_array.append(get_instance_info(inst)) - try: - if state == 'running': - inst.start() - else: - inst.stop() - except EC2ResponseError, e: - module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) - changed = True + + # Check "source_dest_check" attribute + if inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: + inst.modify_attribute('sourceDestCheck', source_dest_check) + changed = True + + # Check "termination_protection" attribute + if inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection: + inst.modify_attribute('disableApiTermination', termination_protection) + changed = True + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + if state == 'running': + inst.start() + else: + inst.stop() + except EC2ResponseError, e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) + changed = True ## Wait for all the instances to finish starting or stopping wait_timeout = time.time() + wait_timeout @@ -1200,6 +1224,7 @@ def main(): instance_profile_name = dict(), instance_ids = dict(type='list', aliases=['instance_id']), source_dest_check = dict(type='bool', default=True), + termination_protection = dict(type='bool', default=False), state = dict(default='present', choices=['present', 'absent', 'running', 'stopped']), exact_count = dict(type='int', default=None), count_tag = dict(), From fe4884e8f09b216f298b4fefdc26084a8be8930f Mon Sep 17 00:00:00 2001 From: Andrea Mandolo Date: Mon, 22 Jun 2015 17:13:42 +0200 Subject: [PATCH 051/114] Added some block_device_mapping (disks) informations to EC2 instance module ouput --- cloud/amazon/ec2.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index dc7d5d38dd3..ad2f8f8f71b 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -619,6 +619,19 @@ def get_instance_info(inst): except AttributeError: instance_info['ebs_optimized'] = False + try: + bdm_dict = {} + bdm = getattr(inst, 'block_device_mapping') + for device_name in bdm.keys(): + bdm_dict[device_name] = { + 'status': bdm[device_name].status, + 'volume_id': bdm[device_name].volume_id, + 'delete_on_termination': bdm[device_name].delete_on_termination + } + instance_info['block_device_mapping'] = bdm_dict + except AttributeError: + instance_info['block_device_mapping'] = False + try: instance_info['tenancy'] = getattr(inst, 'placement_tenancy') except AttributeError: From a1538b490ed71fc291035daa4aaf184369e3fa86 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Jun 2015 08:57:06 -0700 Subject: [PATCH 052/114] Fix documentation --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index ad2f8f8f71b..6d47fa6ac32 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -208,7 +208,7 @@ options: volumes: version_added: "1.5" description: - - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict. + - "a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict." required: false default: null aliases: [] From baff1bf7f0b49e2b4bc9f2c0582a1d356df160d9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Jun 2015 13:16:28 -0400 Subject: [PATCH 053/114] Update choices and version_added for new gce.py param service_account_permissions --- cloud/google/gce.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 48536057637..251a3ee9e93 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -59,12 +59,13 @@ options: default: null aliases: [] service_account_permissions: - version_added: 1.5.1 + version_added: 2.0 description: - service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information) required: false default: null aliases: [] + choices: ["bigquery", "cloud-platform", "compute-ro", "compute-rw", "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email"] pem_file: version_added: 1.5.1 description: From 207abb6f5c7e9d1d50dc52e0ee4cc04d192912fa Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Tue, 23 Jun 2015 14:08:43 -0400 Subject: [PATCH 054/114] Add ClassicLink settings to EC2_launchconfig --- cloud/amazon/ec2_lc.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 3c292377a58..6c5e2c1dd4c 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -116,6 +116,18 @@ options: default: false aliases: [] version_added: "1.8" + classic_link_vpc_id: + description: + - Id of ClassicLink enabled VPC + required: false + default: null + version_added: "2.0" + classic_link_vpc_security_groups" + description: + - A list of security group id’s with which to associate the ClassicLink VPC instances. + required: false + default: null + version_added: "2.0" extends_documentation_fragment: aws """ @@ -184,6 +196,8 @@ def create_launch_config(connection, module): ramdisk_id = module.params.get('ramdisk_id') instance_profile_name = module.params.get('instance_profile_name') ebs_optimized = module.params.get('ebs_optimized') + classic_link_vpc_id = module.params.get('classic_link_vpc_id') + classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups') bdm = BlockDeviceMapping() if volumes: @@ -206,10 +220,12 @@ def create_launch_config(connection, module): kernel_id=kernel_id, spot_price=spot_price, instance_monitoring=instance_monitoring, - associate_public_ip_address = assign_public_ip, + associate_public_ip_address=assign_public_ip, ramdisk_id=ramdisk_id, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, + classic_link_vpc_security_groups=classic_link_vpc_security_groups, + classic_link_vpc_id=classic_link_vpc_id, ) launch_configs = connection.get_all_launch_configurations(names=[name]) @@ -257,7 +273,9 @@ def main(): ebs_optimized=dict(default=False, type='bool'), associate_public_ip_address=dict(type='bool'), instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(type='bool') + assign_public_ip=dict(type='bool'), + classic_link_vpc_security_groups=dict(type='list'), + classic_link_vpc_id=dict(type='str') ) ) From c6f9e08282b7eefc2f7f2825df369d0099c2c3b2 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Mon, 13 Apr 2015 21:22:11 -0400 Subject: [PATCH 055/114] new vpc module. does not contain subnet or route table functionality. changed name to ec2_vpc_net refactored out IGW functionality --- cloud/amazon/ec2_vpc_net.py | 344 ++++++++++++++++++++++++++++++++++++ 1 file changed, 344 insertions(+) create mode 100644 cloud/amazon/ec2_vpc_net.py diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py new file mode 100644 index 00000000000..33c711e7683 --- /dev/null +++ b/cloud/amazon/ec2_vpc_net.py @@ -0,0 +1,344 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net +short_description: configure AWS virtual private clouds +description: + - Create or terminates AWS virtual private clouds. This module has a dependency on python-boto. +version_added: "2.0" +options: + name: + description: + - The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists. + required: yes + cidr_block: + description: + - The CIDR of the VPC + required: yes + aliases: [] + tenancy: + description: + - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created. + required: false + default: default + dns_support: + description: + - Whether to enable AWS DNS support. + required: false + default: true + dns_hostnames: + description: + - Whether to enable AWS hostname support. + required: false + default: true + dhcp_id: + description: + - the id of the DHCP options to use for this vpc + default: null + required: false + tags: + description: + - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different. + default: None + required: false + state: + description: + - The state of the VPC. Either absent or present. + default: present + required: false + multi_ok: + description: + - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created. + default: false + required: false +author: Jonathan Davila +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Create a VPC with dedicate tenancy and a couple of tags + +- ec2_vpc: + name: Module_dev2 + cidr_block: 170.10.0.0/16 + region: us-east-1 + tags: + new_vpc: ec2_vpc_module + this: works22 + tenancy: dedicated + +''' + + +import time +import sys + +try: + import boto + import boto.ec2 + import boto.vpc + from boto.exception import EC2ResponseError + + HAS_BOTO=True +except ImportError: + HAS_BOTO=False + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + +def vpc_exists(module, vpc, name, cidr_block, multi): + """Returns True or False in regards to the existance of a VPC. When supplied + with a CIDR, it will check for matching tags to determine if it is a match + otherwise it will assume the VPC does not exist and thus return false. + """ + exists=False + matched_vpc=None + + try: + matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block}) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if len(matching_vpcs) == 1 and not multi: + exists=True + matched_vpc=str(matching_vpcs).split(':')[1].split(']')[0] + elif len(matching_vpcs) > 1 and not multi: + module.fail_json(msg='Currently there are %d VPCs that have the same name and ' + 'CIDR block you specified. If you would like to create ' + 'the VPC anyways please pass True to the multi_ok param.' % len(matching_vpcs)) + + return exists, matched_vpc + +def vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags): + """This returns True or False. Intended to run after vpc_exists. + It will check all the characteristics of the parameters passed and compare them + to the active VPC. If any discrepancy is found, it will report true, meaning that + the VPC needs to be update in order to match the specified state in the params. + """ + + update_dhcp=False + update_tags=False + dhcp_match=False + + try: + dhcp_list=vpc.get_all_dhcp_options() + + if dhcp_id is not None: + has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id}) + for opts in dhcp_list: + if (str(opts).split(':')[1] == dhcp_id) or has_default: + dhcp_match=True + break + else: + pass + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if not dhcp_match or (has_default and dhcp_id != 'default'): + update_dhcp=True + + if dns_hostnames and dns_support == False: + module.fail_json('In order to enable DNS Hostnames you must have DNS support enabled') + else: + + # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute + # which is needed in order to detect the current status of DNS options. For now we just update + # the attribute each time and is not used as a changed-factor. + try: + vpc.modify_vpc_attribute(vpc_id, enable_dns_support=dns_support) + vpc.modify_vpc_attribute(vpc_id, enable_dns_hostnames=dns_hostnames) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if tags: + try: + current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) + if not set(tags.items()).issubset(set(current_tags.items())): + update_tags=True + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + return update_dhcp, update_tags + + +def update_vpc_tags(module, vpc, vpc_id, tags, name): + tags.update({'Name': name}) + try: + vpc.create_tags(vpc_id, tags) + updated_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + return updated_tags + + +def update_dhcp_opts(module, vpc, vpc_id, dhcp_id): + try: + vpc.associate_dhcp_options(dhcp_id, vpc_id) + dhcp_list=vpc.get_all_dhcp_options() + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + for opts in dhcp_list: + vpc_dhcp=vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}) + matched=False + if opts == dhcp_id: + matched=True + return opts + + if matched == False: + return dhcp_id + +def main(): + argument_spec=ec2_argument_spec() + argument_spec.update(dict( + name=dict(type='str', default=None, required=True), + cidr_block=dict(type='str', default=None, required=True), + tenancy=dict(choices=['default', 'dedicated'], default='default'), + dns_support=dict(type='bool', default=True), + dns_hostnames=dict(type='bool', default=True), + dhcp_opts_id=dict(type='str', default=None, required=False), + tags=dict(type='dict', required=False, default=None), + state=dict(choices=['present', 'absent'], default='present'), + region=dict(type='str', required=True), + multi_ok=dict(type='bool', default=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='Boto is required for this module') + + name=module.params.get('name') + cidr_block=module.params.get('cidr_block') + tenancy=module.params.get('tenancy') + dns_support=module.params.get('dns_support') + dns_hostnames=module.params.get('dns_hostnames') + dhcp_id=module.params.get('dhcp_opts_id') + tags=module.params.get('tags') + state=module.params.get('state') + multi=module.params.get('multi_ok') + + changed=False + new_dhcp_opts=None + new_tags=None + update_dhcp=False + update_tags=False + + region, ec2_url, aws_connect_kwargs=get_aws_connection_info(module) + + try: + vpc=boto.vpc.connect_to_region( + region, + **aws_connect_kwargs + ) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + already_exists, vpc_id=vpc_exists(module, vpc, name, cidr_block, multi) + + if already_exists: + update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags) + if update_dhcp or update_tags: + changed=True + + try: + e_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) + dhcp_list=vpc.get_all_dhcp_options() + has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id}) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + dhcp_opts=None + + try: + for opts in dhcp_list: + if vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}): + dhcp_opts=opts + break + else: + pass + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if not dhcp_opts and has_default: + dhcp_opts='default' + + if state == 'present': + + if not changed and already_exists: + module.exit_json(changed=changed, vpc_id=vpc_id) + elif changed: + if update_dhcp: + dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id) + if update_tags: + e_tags=update_vpc_tags(module, vpc, vpc_id, tags, name) + + module.exit_json(changed=changed, name=name, dhcp_options_id=dhcp_opts, tags=e_tags) + + if not already_exists: + try: + vpc_id=str(vpc.create_vpc(cidr_block, instance_tenancy=tenancy)).split(':')[1] + vpc.create_tags(vpc_id, dict(Name=name)) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags) + + if update_dhcp: + new_dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id) + if update_tags: + new_tags=update_vpc_tags(module, vpc, vpc_id, tags, name) + module.exit_json(changed=True, name=name, vpc_id=vpc_id, dhcp_options=new_dhcp_opts, tags=new_tags) + elif state == 'absent': + if already_exists: + changed=True + try: + vpc.delete_vpc(vpc_id) + module.exit_json(changed=changed, vpc_id=vpc_id) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " + "and/or ec2_vpc_rt modules to ensure the other components are absent." % e_msg) + else: + module.exit_json(msg="VPC is absent") +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From 7dac96e41b8892cddb2682e11131251dacd40dd3 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Mon, 9 Mar 2015 18:52:24 -0400 Subject: [PATCH 056/114] iam certificate module boto import tweak style patch --- cloud/amazon/iam_cert.py | 294 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 294 insertions(+) create mode 100644 cloud/amazon/iam_cert.py diff --git a/cloud/amazon/iam_cert.py b/cloud/amazon/iam_cert.py new file mode 100644 index 00000000000..1f58be753c8 --- /dev/null +++ b/cloud/amazon/iam_cert.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: iam_cert +short_description: Manage server certificates for use on ELBs and CloudFront +description: + - Allows for the management of server certificates +version_added: "2.0" +options: + name: + description: + - Name of certificate to add, update or remove. + required: true + aliases: [] + new_name: + description: + - When present, this will update the name of the cert with the value passed here. + required: false + aliases: [] + new_path: + description: + - When present, this will update the path of the cert with the value passed here. + required: false + aliases: [] + state: + description: + - Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified. + required: true + default: null + choices: [ "present", "absent" ] + aliases: [] + path: + description: + - When creating or updating, specify the desired path of the certificate + required: false + default: "/" + aliases: [] + cert_chain: + description: + - The path to the CA certificate chain in PEM encoded format. + required: false + default: null + aliases: [] + cert: + description: + - The path to the certificate body in PEM encoded format. + required: false + aliases: [] + key: + description: + - The path to the private key of the certificate in PEM encoded format. + dup_ok: + description: + - By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique. + required: false + default: False + aliases: [] + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + + +requirements: [ "boto" ] +author: Jonathan I. Davila +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic server certificate upload +tasks: +- name: Upload Certifcate + iam_cert: + name: very_ssl + state: present + cert: somecert.pem + key: privcertkey + cert_chain: myverytrustedchain + +''' +import json +import sys +try: + import boto + import boto.iam + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + +def cert_meta(iam, name): + opath = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + path + ocert = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + certificate_body + ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + server_certificate_id + upload_date = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + upload_date + exp = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + expiration + return opath, ocert, ocert_id, upload_date, exp + +def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok): + update=False + if any(ct in orig_cert_names for ct in [name, new_name]): + for i_name in [name, new_name]: + if i_name is None: + continue + + if cert is not None: + try: + c_index=orig_cert_names.index(i_name) + except NameError: + continue + else: + if orig_cert_bodies[c_index] == cert: + update=True + break + elif orig_cert_bodies[c_index] != cert: + module.fail_json(changed=False, msg='A cert with the name %s already exists and' + ' has a different certificate body associated' + ' with it. Certifcates cannot have the same name') + else: + update=True + break + elif cert in orig_cert_bodies and not dup_ok: + for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies): + if crt_body == cert: + module.fail_json(changed=False, msg='This certificate already' + ' exists under the name %s' % crt_name) + + return update + + +def cert_action(module, iam, name, cpath, new_name, new_path, state, + cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok): + if state == 'present': + update = dup_check(module, iam, name, new_name, cert, orig_cert_names, + orig_cert_bodies, dup_ok) + if update: + opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name) + changed=True + if new_name and new_path: + iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path) + module.exit_json(changed=changed, original_name=name, new_name=new_name, + original_path=opath, new_path=new_path, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + elif new_name and not new_path: + iam.update_server_cert(name, new_cert_name=new_name) + module.exit_json(changed=changed, original_name=name, new_name=new_name, + cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + elif not new_name and new_path: + iam.update_server_cert(name, new_path=new_path) + module.exit_json(changed=changed, name=new_name, + original_path=opath, new_path=new_path, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + else: + changed=False + module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, + msg='No new path or name specified. No changes made') + else: + changed=True + iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath) + opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name) + module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + elif state == 'absent': + if name in orig_cert_names: + changed=True + iam.delete_server_cert(name) + module.exit_json(changed=changed, deleted_cert=name) + else: + changed=False + module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name) + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict( + default=None, required=True, choices=['present', 'absent']), + name=dict(default=None, required=False), + cert=dict(default=None, required=False), + key=dict(default=None, required=False), + cert_chain=dict(default=None, required=False), + new_name=dict(default=None, required=False), + path=dict(default='/', required=False), + new_path=dict(default=None, required=False), + dup_ok=dict(default=False, required=False, choices=[False, True]) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[], + ) + + if not HAS_BOTO: + module.fail_json(msg="Boto is required for this module") + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + + try: + iam = boto.iam.connection.IAMConnection( + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key, + ) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + state = module.params.get('state') + name = module.params.get('name') + path = module.params.get('path') + new_name = module.params.get('new_name') + new_path = module.params.get('new_path') + cert_chain = module.params.get('cert_chain') + dup_ok = module.params.get('dup_ok') + if state == 'present': + cert = open(module.params.get('cert'), 'r').read().rstrip() + key = open(module.params.get('key'), 'r').read().rstrip() + if cert_chain is not None: + cert_chain = open(module.params.get('cert_chain'), 'r').read() + else: + key=cert=chain=None + + orig_certs = [ctb['server_certificate_name'] for ctb in \ + iam.get_all_server_certs().\ + list_server_certificates_result.\ + server_certificate_metadata_list] + orig_bodies = [iam.get_server_certificate(thing).\ + get_server_certificate_result.\ + certificate_body \ + for thing in orig_certs] + if new_name == name: + new_name = None + if new_path == path: + new_path = None + + changed = False + try: + cert_action(module, iam, name, path, new_name, new_path, state, + cert, key, cert_chain, orig_certs, orig_bodies, dup_ok) + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err), debug=[cert,key]) + + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() From 33533eb1560ded3de4f2402c0d2c076c09bad088 Mon Sep 17 00:00:00 2001 From: billwanjohi Date: Tue, 23 Jun 2015 18:31:48 +0000 Subject: [PATCH 057/114] iam: use modern helper to allow sts previous implementation ignored the session token when present --- cloud/amazon/iam.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index a7d0fbeee5b..d2807a23b44 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -565,13 +565,10 @@ def main(): module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, " "please specificy present or absent") - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connection.IAMConnection( - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key, - ) + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) From 58ef71fc8467fb0f6786b200732bbc0eeb54a1ed Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Sun, 31 May 2015 19:03:35 -0400 Subject: [PATCH 058/114] add download ability to unarchive module --- files/unarchive.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/files/unarchive.py b/files/unarchive.py index 625989ffdfb..2efd48294a1 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -32,6 +32,7 @@ options: src: description: - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack. + - If copy=no and src contains ://, the remote machine will download the file from the url first. (version_added 2.0) required: true default: null dest: @@ -81,6 +82,9 @@ EXAMPLES = ''' # Unarchive a file that is already on the remote machine - unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no + +# Unarchive a file that needs to be downloaded +- unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no ''' import re @@ -269,6 +273,25 @@ def main(): if not os.path.exists(src): if copy: module.fail_json(msg="Source '%s' failed to transfer" % src) + # If copy=false, and src= contains ://, try and download the file to a temp directory. + elif '://' in src: + tempdir = os.path.dirname(__file__) + package = os.path.join(tempdir, str(src.rsplit('/', 1)[1])) + try: + rsp, info = fetch_url(module, src) + f = open(package, 'w') + # Read 1kb at a time to save on ram + while True: + data = rsp.read(1024) + + if data == "": + break # End of file, break while loop + + f.write(data) + f.close() + src = package + except Exception, e: + module.fail_json(msg="Failure downloading %s, %s" % (src, e)) else: module.fail_json(msg="Source '%s' does not exist" % src) if not os.access(src, os.R_OK): @@ -315,5 +338,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * if __name__ == '__main__': main() From 56d4f21c5f7086a3788844a391043d6748e6ce93 Mon Sep 17 00:00:00 2001 From: Michael Weinrich Date: Fri, 19 Jun 2015 14:43:40 +0200 Subject: [PATCH 059/114] Use aws connect calls that allow boto profile use --- cloud/amazon/iam.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index c1d5ef70901..bda953faab4 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -146,6 +146,7 @@ import sys try: import boto import boto.iam + import boto.ec2 HAS_BOTO = True except ImportError: HAS_BOTO = False From 29b00ba526d18edd5a0bf2e94d8f6a55ef7ec85b Mon Sep 17 00:00:00 2001 From: zimbatm Date: Tue, 5 May 2015 16:07:18 +0100 Subject: [PATCH 060/114] route53: add support for routing policies It is now possible to pass various routing policies if an identity is provided. This commit also introduces multiple optimisations: * Only fetch records for the given domain * Use UPSERT instead of DELETE+CREATE to update existing records --- cloud/amazon/route53.py | 133 ++++++++++++++++++++++++++++++---------- 1 file changed, 101 insertions(+), 32 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index d25be6b99ea..c2ad603a1f4 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -93,6 +93,45 @@ options: required: false default: false version_added: "1.9" + identifier: + description: + - Weighted and latency-based resource record sets only. An identifier + that differentiates among multiple resource record sets that have the + same combination of DNS name and type. + required: false + default: null + version_added: "2.0" + weight: + description: + - Weighted resource record sets only. Among resource record sets that + have the same combination of DNS name and type, a value that + determines what portion of traffic for the current resource record set + is routed to the associated location. + required: false + default: null + version_added: "2.0" + region: + description: + - Latency-based resource record sets only Among resource record sets + that have the same combination of DNS name and type, a value that + determines which region this should be associated with for the + latency-based routing + required: false + default: null + version_added: "2.0" + health_check: + description: + - Health check to associate with this record + required: false + default: null + version_added: "2.0" + failover: + description: + - Failover resource record sets only. Whether this is the primary or + secondary resource record set. + required: false + default: null + version_added: "2.0" author: "Bruce Pennypacker (@bpennypacker)" extends_documentation_fragment: aws ''' @@ -156,6 +195,18 @@ EXAMPLES = ''' alias=True alias_hosted_zone_id="{{ elb_zone_id }}" +# Use a routing policy to distribute traffic: +- route53: + command: "create" + zone: "foo.com" + record: "www.foo.com" + type: "CNAME" + value: "host1.foo.com" + ttl: 30 + # Routing policy + identifier: "host1@www" + weight: 100 + health_check: "d994b780-3150-49fd-9205-356abdd42e75" ''' @@ -166,11 +217,21 @@ try: import boto.ec2 from boto import route53 from boto.route53 import Route53Connection - from boto.route53.record import ResourceRecordSets + from boto.route53.record import Record, ResourceRecordSets HAS_BOTO = True except ImportError: HAS_BOTO = False +def get_zone_by_name(conn, module, zone_name, want_private): + """Finds a zone by name""" + for zone in conn.get_zones(): + # only save this zone id if the private status of the zone matches + # the private_zone_in boolean specified in the params + private_zone = module.boolean(zone.config.get('PrivateZone', False)) + if private_zone == want_private and zone.name == zone_name: + return zone + return None + def commit(changes, retry_interval): """Commit changes, but retry PriorRequestNotComplete errors.""" @@ -200,6 +261,11 @@ def main(): overwrite = dict(required=False, type='bool'), retry_interval = dict(required=False, default=500), private_zone = dict(required=False, type='bool', default=False), + identifier = dict(required=False), + weight = dict(required=False, type='int'), + region = dict(required=False), + health_check = dict(required=False), + failover = dict(required=False), ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -217,6 +283,11 @@ def main(): alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') retry_interval_in = module.params.get('retry_interval') private_zone_in = module.params.get('private_zone') + identifier_in = module.params.get('identifier') + weight_in = module.params.get('weight') + region_in = module.params.get('region') + health_check_in = module.params.get('health_check') + failover_in = module.params.get('failover') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) @@ -249,32 +320,34 @@ def main(): except boto.exception.BotoServerError, e: module.fail_json(msg = e.error_message) - # Get all the existing hosted zones and save their ID's - zones = {} - results = conn.get_all_hosted_zones() - for r53zone in results['ListHostedZonesResponse']['HostedZones']: - # only save this zone id if the private status of the zone matches - # the private_zone_in boolean specified in the params - if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in: - zone_id = r53zone['Id'].replace('/hostedzone/', '') - zones[r53zone['Name']] = zone_id + # Find the named zone ID + zone = get_zone_by_name(conn, module, zone_in, private_zone_in) # Verify that the requested zone is already defined in Route53 - if not zone_in in zones: + if zone is None: errmsg = "Zone %s does not exist in Route53" % zone_in module.fail_json(msg = errmsg) record = {} found_record = False - sets = conn.get_all_rrsets(zones[zone_in]) + wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in, + identifier=identifier_in, weight=weight_in, region=region_in, + health_check=health_check_in, failover=failover_in) + for v in value_list: + if alias_in: + wanted_rset.set_alias(alias_hosted_zone_id_in, v) + else: + wanted_rset.add_value(v) + + sets = conn.get_all_rrsets(zone.id, name=record_in, type=type_in, identifier=identifier_in) for rset in sets: # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round # tripping of things like * and @. decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') - if rset.type == type_in and decoded_name.lower() == record_in.lower(): + if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in: found_record = True record['zone'] = zone_in record['type'] = rset.type @@ -282,6 +355,11 @@ def main(): record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) + record['identifier'] = rset.identifier + record['weight'] = rset.weight + record['region'] = rset.region + record['failover'] = rset.failover + record['health_check'] = rset.health_check if rset.alias_dns_name: record['alias'] = True record['value'] = rset.alias_dns_name @@ -291,8 +369,9 @@ def main(): record['alias'] = False record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) - if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create': + if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml(): module.exit_json(changed=False) + break if command_in == 'get': module.exit_json(changed=False, set=record) @@ -300,26 +379,16 @@ def main(): if command_in == 'delete' and not found_record: module.exit_json(changed=False) - changes = ResourceRecordSets(conn, zones[zone_in]) - - if command_in == 'create' and found_record: - if not module.params['overwrite']: - module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it") - else: - change = changes.add_change("DELETE", record_in, type_in, record['ttl']) - for v in record['values']: - if record['alias']: - change.set_alias(record['alias_hosted_zone_id'], v) - else: - change.add_value(v) + changes = ResourceRecordSets(conn, zone.id) if command_in == 'create' or command_in == 'delete': - change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in) - for v in value_list: - if module.params['alias']: - change.set_alias(alias_hosted_zone_id_in, v) - else: - change.add_value(v) + if command_in == 'create' and found_record: + if not module.params['overwrite']: + module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it") + command = 'UPSERT' + else: + command = command_in.upper() + changes.add_change_record(command, wanted_rset) try: result = commit(changes, retry_interval_in) From 559ad374f573c0dda4c5ecb4cbc7d19a731e9524 Mon Sep 17 00:00:00 2001 From: Michael Weinrich Date: Fri, 19 Jun 2015 17:06:51 +0200 Subject: [PATCH 061/114] Add the option to pass a string as policy --- cloud/amazon/iam_policy.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index f1a6abdd0a6..32a25ae2517 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -40,7 +40,12 @@ options: aliases: [] policy_document: description: - - The path to the properly json formatted policy file + - The path to the properly json formatted policy file (mutually exclusive with C(policy_json)) + required: false + aliases: [] + policy_json: + description: + - A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly) required: false aliases: [] state: @@ -109,6 +114,19 @@ task: state: present with_items: new_groups.results +# Create a new S3 policy with prefix per user +tasks: +- name: Create S3 policy from template + iam_policy: + iam_type: user + iam_name: "{{ item.user }}" + policy_name: "s3_limited_access_{{ item.s3_user_prefix }}" + state: present + policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} " + with_items: + - user: s3_user + prefix: s3_user_prefix + ''' import json import urllib @@ -271,6 +289,7 @@ def main(): iam_name=dict(default=None, required=False), policy_name=dict(default=None, required=True), policy_document=dict(default=None, required=False), + policy_json=dict(type='str', default=None, required=False), skip_duplicates=dict(type='bool', default=True, required=False) )) @@ -284,10 +303,19 @@ def main(): name = module.params.get('iam_name') policy_name = module.params.get('policy_name') skip = module.params.get('skip_duplicates') + + if module.params.get('policy_document') != None and module.params.get('policy_json') != None: + module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set') + if module.params.get('policy_document') != None: with open(module.params.get('policy_document'), 'r') as json_data: pdoc = json.dumps(json.load(json_data)) json_data.close() + elif module.params.get('policy_json') != None: + try: + pdoc = json.dumps(json.loads(module.params.get('policy_json'))) + except Exception as e: + module.fail_json(msg=str(e) + '\n' + module.params.get('policy_json')) else: pdoc=None From c5324f54e61913a1573b1930fd599921c02319bc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 06:48:57 -0700 Subject: [PATCH 062/114] Bump amount of file to download in a chunk to 64k. --- files/unarchive.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/files/unarchive.py b/files/unarchive.py index 647c218460e..8053991b63d 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -94,6 +94,9 @@ from zipfile import ZipFile # String from tar that shows the tar contents are different from the # filesystem DIFFERENCE_RE = re.compile(r': (.*) differs$') +# When downloading an archive, how much of the archive to download before +# saving to a tempfile (64k) +BUFSIZE = 65536 class UnarchiveError(Exception): pass @@ -282,7 +285,7 @@ def main(): f = open(package, 'w') # Read 1kb at a time to save on ram while True: - data = rsp.read(1024) + data = rsp.read(BUFSIZE) if data == "": break # End of file, break while loop From 4519dd5f4d8fb1787bd81c56403b5fab02075dae Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 06:51:00 -0700 Subject: [PATCH 063/114] Small cleanups. * Import url(lib|parse|lib2) if needed by the module rather than relying on module_utils.urls to do so. * Remove stdlib modules from requirements * Use the if __name__ conditional for invoking main() --- network/basics/get_url.py | 7 +++++-- packaging/os/rpm_key.py | 6 ++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 074bf8bb484..f7ea5008cee 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -113,7 +113,7 @@ options: - all arguments accepted by the M(file) module also work here required: false # informational: requirements for nodes -requirements: [ urllib2, urlparse ] +requirements: [ ] author: "Jan-Piet Mens (@jpmens)" ''' @@ -125,6 +125,8 @@ EXAMPLES=''' get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c ''' +import urlparse + try: import hashlib HAS_HASHLIB=True @@ -315,4 +317,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/rpm_key.py b/packaging/os/rpm_key.py index 1b38da3823b..d2d5e684015 100644 --- a/packaging/os/rpm_key.py +++ b/packaging/os/rpm_key.py @@ -60,9 +60,10 @@ EXAMPLES = ''' # Example action to ensure a key is not present in the db - rpm_key: state=absent key=DEADB33F ''' +import re import syslog import os.path -import re +import urllib2 import tempfile def is_pubkey(string): @@ -203,4 +204,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() From 73d5a8a63a9f250da0c867fb2efb927b3b91c183 Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Wed, 24 Jun 2015 11:05:37 -0400 Subject: [PATCH 064/114] Fixing typo --- cloud/amazon/ec2_lc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 6c5e2c1dd4c..818e8efbb50 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -122,7 +122,7 @@ options: required: false default: null version_added: "2.0" - classic_link_vpc_security_groups" + classic_link_vpc_security_groups: description: - A list of security group id’s with which to associate the ClassicLink VPC instances. required: false From dba3bc75399ce520de6eb96b0a93c829114580d9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 08:12:49 -0700 Subject: [PATCH 065/114] Read the url in in chunks so that we don't use as much memory for large packages --- packaging/os/yum.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 22e7ca4ad71..14339b4c18b 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -152,6 +152,9 @@ EXAMPLES = ''' yum: name="@Development tools" state=present ''' +# 64k. Number of bytes to read at a time when manually downloading pkgs via a url +BUFSIZE = 65536 + def_qf = "%{name}-%{version}-%{release}.%{arch}" def log(msg): @@ -526,9 +529,11 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): package = os.path.join(tempdir, str(pkg.rsplit('/', 1)[1])) try: rsp, info = fetch_url(module, pkg) - data = rsp.read() f = open(package, 'w') - f.write(data) + data = rsp.read(BUFSIZE) + while data: + f.write(data) + data = rsp.read(BUFSIZE) f.close() pkg = package except Exception, e: From dbed8cee3bf81b0482ed6ba611e45fd6f73a5381 Mon Sep 17 00:00:00 2001 From: Jay Taylor Date: Fri, 24 Apr 2015 14:26:37 -0700 Subject: [PATCH 066/114] Added support for spot request type specification (to support persistent spot requests). --- cloud/amazon/ec2.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6d47fa6ac32..6fb6f4a5417 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -76,6 +76,13 @@ options: required: false default: null aliases: [] + spot_type: + description: + - Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied. + required: false + default: "one-time" + choices: [ "one-time", "persistent" ] + aliases: [] image: description: - I(ami) ID to use for the instance @@ -783,6 +790,7 @@ def create_instances(module, ec2, vpc, override_count=None): instance_type = module.params.get('instance_type') tenancy = module.params.get('tenancy') spot_price = module.params.get('spot_price') + spot_type = module.params.get('spot_type') image = module.params.get('image') if override_count: count = override_count @@ -976,6 +984,7 @@ def create_instances(module, ec2, vpc, override_count=None): params.update(dict( count = count_remaining, + type = spot_type, )) res = ec2.request_spot_instances(spot_price, **params) @@ -1220,6 +1229,7 @@ def main(): zone = dict(aliases=['aws_zone', 'ec2_zone']), instance_type = dict(aliases=['type']), spot_price = dict(), + spot_type = dict(default='one-time'), image = dict(), kernel = dict(), count = dict(type='int', default='1'), From 6611ee34a59c9b048d68933644dd0a2f1039574a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 11:23:34 -0700 Subject: [PATCH 067/114] Fix for when the password file did not exist previously --- web_infrastructure/htpasswd.py | 45 ++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 274f8fa38b2..bfb525b67eb 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -78,6 +78,7 @@ EXAMPLES = """ import os +import tempfile from distutils.version import StrictVersion try: @@ -199,28 +200,34 @@ def main(): module.fail_json(msg="This module requires the passlib Python library") # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. - f = open(path, "r") try: - lines=f.readlines() - finally: - f.close - - # If the file gets edited, it returns true, so only edit the file if it has blank lines - strip = False - for line in lines: - if not line.strip(): - strip = True - - if strip: - # If check mode, create a temporary file - if check_mode: - temp = tempfile.NamedTemporaryFile() - path = temp.name - f = open(path,"w") + f = open(path, "r") + except IOError: + # No preexisting file to remove blank lines from + f = None + else: try: - [f.write(line) for line in lines if line.strip() ] + lines = f.readlines() finally: - f.close + f.close() + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + break + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + f = open(path, "w") + try: + [ f.write(line) for line in lines if line.strip() ] + finally: + f.close() try: if state == 'present': From 65c63b3afa9343b65ea3c919b632443cd5e7eade Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 24 Jun 2015 14:43:04 -0400 Subject: [PATCH 068/114] Updating version_added for new spot_type param in ec2 module Also made sure 'choices' were set on the module param, to catch errors in user's playbooks, etc. --- cloud/amazon/ec2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6fb6f4a5417..b79395fb3a1 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -77,6 +77,7 @@ options: default: null aliases: [] spot_type: + version_added: "2.0" description: - Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied. required: false @@ -1229,7 +1230,7 @@ def main(): zone = dict(aliases=['aws_zone', 'ec2_zone']), instance_type = dict(aliases=['type']), spot_price = dict(), - spot_type = dict(default='one-time'), + spot_type = dict(default='one-time', choices=["one-time", "persistent"]), image = dict(), kernel = dict(), count = dict(type='int', default='1'), From 00322c43fc7095e926fff25837343cb700b6a9a2 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Fri, 14 Nov 2014 16:07:29 -0800 Subject: [PATCH 069/114] Add support for listing keys in a specific S3 bucket Includes support for specifying a prefix, marker, and/or max_keys. Returns a list of key names (as strings). --- cloud/amazon/s3.py | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 9bec312294a..ecf35d00f5d 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -64,7 +64,7 @@ options: version_added: "1.6" mode: description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket), delete (bucket), and delobj (delete object). + - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys), create (bucket), delete (bucket), and delobj (delete object). required: true default: null aliases: [] @@ -129,6 +129,12 @@ EXAMPLES = ''' # PUT/upload with metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' +# List keys simple +- s3: bucket=mybucket mode=list + +# List keys all options +- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472 + # Create an empty bucket - s3: bucket=mybucket mode=create @@ -204,6 +210,19 @@ def create_bucket(module, s3, bucket, location=None): if bucket: return True +def get_bucket(module, s3, bucket): + try: + return s3.lookup(bucket) + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + +def list_keys(module, bucket_object, prefix, marker, max_keys): + all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys) + + keys = map((lambda x: x.key), all_keys) + + module.exit_json(msg="LIST operation complete", s3_keys=keys) + def delete_bucket(module, s3, bucket): try: bucket = s3.lookup(bucket) @@ -329,11 +348,14 @@ def main(): dest = dict(default=None), encrypt = dict(default=True, type='bool'), expiry = dict(default=600, aliases=['expiration']), + marker = dict(default=None), + max_keys = dict(default=1000), metadata = dict(type='dict'), - mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj'], required=True), + mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), object = dict(), version = dict(default=None), overwrite = dict(aliases=['force'], default='always'), + prefix = dict(default=None), retries = dict(aliases=['retry'], type='int', default=0), s3_url = dict(aliases=['S3_URL']), src = dict(), @@ -349,11 +371,14 @@ def main(): expiry = int(module.params['expiry']) if module.params.get('dest'): dest = os.path.expanduser(module.params.get('dest')) + marker = module.params.get('marker') + max_keys = module.params.get('max_keys') metadata = module.params.get('metadata') mode = module.params.get('mode') obj = module.params.get('object') version = module.params.get('version') overwrite = module.params.get('overwrite') + prefix = module.params.get('prefix') retries = module.params.get('retries') s3_url = module.params.get('s3_url') src = module.params.get('src') @@ -537,6 +562,16 @@ def main(): else: module.fail_json(msg="Bucket parameter is required.", failed=True) + # Support for listing a set of keys + if mode == 'list': + bucket_object = get_bucket(module, s3, bucket) + + # If the bucket does not exist then bail out + if bucket_object is None: + module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True) + + list_keys(module, bucket_object, prefix, marker, max_keys) + # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. if mode == 'create': From 50912c9092eb567c5dc61c47eecd2ccc585ae364 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 16:32:47 -0700 Subject: [PATCH 070/114] Fix apt_repository so that it does not modify the mode of existing repositories --- packaging/os/apt_repository.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index 496f5c5e269..eee58f77729 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -126,6 +126,8 @@ class InvalidSource(Exception): class SourcesList(object): def __init__(self): self.files = {} # group sources by file + # Repositories that we're adding -- used to implement mode param + self.new_repos = set() self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist') # read sources.list if it exists @@ -257,8 +259,9 @@ class SourcesList(object): module.atomic_move(tmp_path, filename) # allow the user to override the default mode - this_mode = module.params['mode'] - module.set_mode_if_different(filename, this_mode, False) + if filename in self.new_repos: + this_mode = module.params['mode'] + module.set_mode_if_different(filename, this_mode, False) else: del self.files[filename] if os.path.exists(filename): @@ -300,6 +303,7 @@ class SourcesList(object): files = self.files[file] files.append((len(files), True, True, source_new, comment_new)) + self.new_repos.add(file) def add_source(self, line, comment='', file=None): source = self._parse(line, raise_if_invalid_or_disabled=True)[2] From 964d73172207df628cc7e8cfa9c9782d28f93fa9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 08:29:42 -0700 Subject: [PATCH 071/114] Add version_added documentation to log_driver parameter --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index c50f5f53e32..9986c94f9ec 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -108,6 +108,7 @@ options: - json-file - none - syslog + version_added: "2.0" memory_limit: description: - RAM allocated to the container as a number of bytes or as a human-readable From bed420cd531c30c0865bf331c74035494b612a1e Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Thu, 25 Jun 2015 12:19:20 -0400 Subject: [PATCH 072/114] Update os_keypair for latest shade Uses the latest version of shade for cleaner code. Also, always return the key dict whether we create the key, or it already exists. The example using public_key_file is corrected to use a full path since ~ is not converted for us. --- cloud/openstack/os_keypair.py | 80 +++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index b404e6cc02a..a9c2640628f 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -41,12 +41,14 @@ options: default: None public_key: description: - - The public key that would be uploaded to nova and injected to vm's upon creation + - The public key that would be uploaded to nova and injected into VMs + upon creation. required: false default: None public_key_file: description: - - Path to local file containing ssh public key. Mutually exclusive with public_key + - Path to local file containing ssh public key. Mutually exclusive + with public_key. required: false default: None state: @@ -63,7 +65,7 @@ EXAMPLES = ''' cloud: mordred state: present name: ansible_key - public_key_file: ~/.ssh/id_rsa.pub + public_key_file: /home/me/.ssh/id_rsa.pub # Creates a new key pair and the private key returned after the run. - os_keypair: @@ -73,16 +75,33 @@ EXAMPLES = ''' ''' +def _system_state_change(module, keypair): + state = module.params['state'] + if state == 'present' and not keypair: + return True + if state == 'absent' and keypair: + return True + return False + + def main(): argument_spec = openstack_full_argument_spec( name = dict(required=True), public_key = dict(default=None), public_key_file = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), + state = dict(default='present', + choices=['absent', 'present']), ) + module_kwargs = openstack_module_kwargs( mutually_exclusive=[['public_key', 'public_key_file']]) - module = AnsibleModule(argument_spec, **module_kwargs) + + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') state = module.params['state'] name = module.params['name'] @@ -90,44 +109,33 @@ def main(): if module.params['public_key_file']: public_key = open(module.params['public_key_file']).read() - - if not HAS_SHADE: - module.fail_json(msg='shade is required for this module') + public_key = public_key.rstrip() try: cloud = shade.openstack_cloud(**module.params) + keypair = cloud.get_keypair(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, keypair)) if state == 'present': - for key in cloud.list_keypairs(): - if key.name == name: - if public_key and (public_key != key.public_key): - module.fail_json( - msg="Key name %s present but key hash not the same" - " as offered. Delete key first." % key.name - ) - else: - module.exit_json(changed=False, result="Key present") - try: - key = cloud.create_keypair(name, public_key) - except Exception, e: - module.exit_json( - msg="Error in creating the keypair: %s" % e.message - ) - if not public_key: - module.exit_json(changed=True, key=key.private_key) - module.exit_json(changed=True, key=None) + if keypair and keypair['name'] == name: + if public_key and (public_key != keypair['public_key']): + module.fail_json( + msg="Key name %s present but key hash not the same" + " as offered. Delete key first." % name + ) + else: + module.exit_json(changed=False, key=keypair) + + new_key = cloud.create_keypair(name, public_key) + module.exit_json(changed=True, key=new_key) elif state == 'absent': - for key in cloud.list_keypairs(): - if key.name == name: - try: - cloud.delete_keypair(name) - except Exception, e: - module.fail_json( - msg="Keypair deletion has failed: %s" % e.message - ) - module.exit_json(changed=True, result="deleted") - module.exit_json(changed=False, result="not present") + if keypair: + cloud.delete_keypair(name) + module.exit_json(changed=True) + module.exit_json(changed=False) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 280ccfbb78e5e80c6f820ec371a848ff48fe9913 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 09:28:39 -0700 Subject: [PATCH 073/114] Add note about redirects proxies Fixes #1574 --- network/basics/get_url.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index f7ea5008cee..64cd24b6d09 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -38,6 +38,8 @@ description: (see `setting the environment `_), or by using the use_proxy option. + - HTTP redirects can redirect from HTTP to HTTPS so you should be sure that + your proxy environment for both protocols is correct. version_added: "0.6" options: url: From 892212b9c434bb4d3f8c0a788f9155284d50f209 Mon Sep 17 00:00:00 2001 From: Juan Picca Date: Thu, 12 Feb 2015 09:25:36 -0200 Subject: [PATCH 074/114] synchronize module: add partial option --- files/synchronize.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/files/synchronize.py b/files/synchronize.py index 7f934e4e6f4..761529742de 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -152,6 +152,12 @@ options: default: required: false version_added: "1.6" + partial: + description: + - Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster. + default: no + required: false + version_added: "1.9" notes: - rsync must be installed on both the local and remote machine. - Inspect the verbose output to validate the destination user/host/path @@ -237,6 +243,7 @@ def main(): rsync_timeout = dict(type='int', default=0), rsync_opts = dict(type='list'), ssh_args = dict(type='str'), + partial = dict(default='no', type='bool'), ), supports_check_mode = True ) @@ -254,6 +261,7 @@ def main(): compress = module.params['compress'] existing_only = module.params['existing_only'] dirs = module.params['dirs'] + partial = module.params['partial'] # the default of these params depends on the value of archive recursive = module.params['recursive'] links = module.params['links'] @@ -332,6 +340,9 @@ def main(): if rsync_opts: cmd = cmd + " " + " ".join(rsync_opts) + if partial: + cmd = cmd + " --partial" + changed_marker = '<>' cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'" From 8ba96aaf4bb5c7e3534408be693ead01c4c49027 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Thu, 25 Jun 2015 10:36:07 -0700 Subject: [PATCH 075/114] update documentation, adding new params --- cloud/amazon/s3.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index ecf35d00f5d..4edac74366b 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -56,6 +56,18 @@ options: required: false default: 600 aliases: [] + marker: + description: + - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order. + required: false + default: null + version_added: "2.0" + max_keys: + description: + - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys. + required: false + default: 1000 + version_added: "2.0" metadata: description: - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. @@ -64,7 +76,7 @@ options: version_added: "1.6" mode: description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys), create (bucket), delete (bucket), and delobj (delete object). + - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys (2.0+)), create (bucket), delete (bucket), and delobj (delete object). required: true default: null aliases: [] @@ -73,6 +85,12 @@ options: - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. required: false default: null + prefix: + description: + - Limits the response to keys that begin with the specified prefix for list mode + required: false + default: null + version_added: "2.0" version: description: - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket. From e90d02c35cfacda523eebdecdac14cc3194dc04d Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Thu, 25 Jun 2015 10:37:17 -0700 Subject: [PATCH 076/114] iterate through all keys in a more pythonic manner --- cloud/amazon/s3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 4edac74366b..8c5221e3c1f 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -237,7 +237,7 @@ def get_bucket(module, s3, bucket): def list_keys(module, bucket_object, prefix, marker, max_keys): all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys) - keys = map((lambda x: x.key), all_keys) + keys = [x.key for x in all_keys] module.exit_json(msg="LIST operation complete", s3_keys=keys) From d435d5ce0ae2597fdde4600dd07edbb8c9c4fdfe Mon Sep 17 00:00:00 2001 From: verm666 Date: Thu, 25 Jun 2015 10:56:29 -0700 Subject: [PATCH 077/114] This change is in response to issue #133. The original problem is: apt_repository.py connect to launchpad on every playbook run. In this patch apt_repository.py checks if required repository already exists or not. If no - paa will be added, if yes - just skip actions. --- packaging/os/apt_repository.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index eee58f77729..8f6d18d09d5 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -378,6 +378,25 @@ class UbuntuSourcesList(SourcesList): source = self._parse(line, raise_if_invalid_or_disabled=True)[2] self._remove_valid_source(source) + @property + def repos_urls(self): + _repositories = [] + for parsed_repos in self.files.values(): + for parsed_repo in parsed_repos: + enabled = parsed_repo[1] + source_line = parsed_repo[3] + + if not enabled: + continue + + if source_line.startswith('ppa:'): + source, ppa_owner, ppa_name = self._expand_ppa(i[3]) + _repositories.append(source) + else: + _repositories.append(source_line) + + return _repositories + def get_add_ppa_signing_key_callback(module): def _run_command(command): @@ -425,8 +444,13 @@ def main(): sources_before = sourceslist.dump() + if repo.startswith('ppa:'): + expanded_repo = sourceslist._expand_ppa(repo)[0] + else: + expanded_repo = repo + try: - if state == 'present': + if state == 'present' and expanded_repo not in sourceslist.repos_urls: sourceslist.add_source(repo) elif state == 'absent': sourceslist.remove_source(repo) From 2206477b739f767215ae0dadf4dea6e5cf36168f Mon Sep 17 00:00:00 2001 From: Vladimir Martsul Date: Fri, 26 Jun 2015 01:40:58 +0600 Subject: [PATCH 078/114] Add "force" description Add "force" option description --- files/template.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/files/template.py b/files/template.py index 2feb599abdf..a1dc72c27bd 100644 --- a/files/template.py +++ b/files/template.py @@ -47,6 +47,14 @@ options: required: false default: "" version_added: "1.2" + force: + description: + - the default is C(yes), which will replace the remote file when contents + are different than the source. If C(no), the file will only be transferred + if the destination does not exist. + required: false + choices: [ "yes", "no" ] + default: "yes" notes: - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." requirements: [] From a5bba2488f2775da8cb08f4b9fb3f3c5d230ad2b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 12:44:08 -0700 Subject: [PATCH 079/114] Update version_added to 2.0 for the partial option --- files/synchronize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/synchronize.py b/files/synchronize.py index 761529742de..abad5ad359f 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -157,7 +157,7 @@ options: - Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster. default: no required: false - version_added: "1.9" + version_added: "2.0" notes: - rsync must be installed on both the local and remote machine. - Inspect the verbose output to validate the destination user/host/path From 29e4a127e19fee326c5c698d249f6b9791b9e705 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 25 Jun 2015 17:11:38 -0500 Subject: [PATCH 080/114] Default net to 'bridge' in container diff This prevents an unnecessary reload when the `net` parameter is unspecified (i.e. almost always). --- cloud/docker/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 9986c94f9ec..a6090c4b0c1 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1108,8 +1108,8 @@ class DockerManager(object): # NETWORK MODE - expected_netmode = self.module.params.get('net') or '' - actual_netmode = container['HostConfig']['NetworkMode'] + expected_netmode = self.module.params.get('net') or 'bridge' + actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge' if actual_netmode != expected_netmode: self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) differing.append(container) From 725a7b2f59a296467439edde5aab75dc9552e60d Mon Sep 17 00:00:00 2001 From: verm666 Date: Fri, 26 Jun 2015 05:49:59 -0700 Subject: [PATCH 081/114] unarchive: fix work with 0 bytes archives This change is in response to issue #1575 --- files/unarchive.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/files/unarchive.py b/files/unarchive.py index 8053991b63d..a3544253402 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -300,6 +300,16 @@ def main(): if not os.access(src, os.R_OK): module.fail_json(msg="Source '%s' not readable" % src) + # skip working with 0 size archives + try: + if os.path.getsize(src) == 0: + res_args = { + 'changed': False + } + module.exit_json(**res_args) + except Exception, e: + module.fail_json(msg="Source '%s' not readable" % src) + # is dest OK to receive tar file? if not os.path.isdir(dest): module.fail_json(msg="Destination '%s' is not a directory" % dest) From a81dea2b17428127b507888fd0c2fad59c1aca1e Mon Sep 17 00:00:00 2001 From: "Roetman, Victor" Date: Fri, 26 Jun 2015 14:50:29 -0400 Subject: [PATCH 082/114] apache2_module documetation update requires a2enmod and a2dismod --- web_infrastructure/apache2_module.py | 1 + 1 file changed, 1 insertion(+) diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index ec9a8985e60..cb43ba9b0eb 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -35,6 +35,7 @@ options: choices: ['present', 'absent'] default: present +requirements: ["a2enmod","a2dismod"] ''' EXAMPLES = ''' From d651b4169133ed8ef17d63d0418f733061fc1a6d Mon Sep 17 00:00:00 2001 From: Robb Wagoner Date: Fri, 26 Jun 2015 15:39:08 -0700 Subject: [PATCH 083/114] return health of instances and counts --- cloud/amazon/ec2_elb_lb.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 566db2d329a..9d626a98194 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -384,9 +384,33 @@ class ElbManager(object): 'hosted_zone_name': check_elb.canonical_hosted_zone_name, 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, 'lb_cookie_policy': lb_cookie_policy, - 'app_cookie_policy': app_cookie_policy + 'app_cookie_policy': app_cookie_policy, + 'instances': [instance.id for instance in check_elb.instances], + 'out_of_service_count': 0, + 'in_service_count': 0, + 'unknown_instance_state_count': 0 } + # status of instances behind the ELB + if info['instances']: + info['instance_health'] = [ dict({ + "instance_id": instance_state.instance_id, + "reason_code": instance_state.reason_code, + "state": instance_state.state, + }) for instance_state in self.elb_conn.describe_instance_health(self.name)] + else: + info['instance_health'] = [] + + # instance state counts: InService or OutOfService + if info['instance_health']: + for instance_state in info['instance_health']: + if instance_state['state'] == "InService": + info['in_service_count'] += 1 + elif instance_state['state'] == "OutOfService": + info['out_of_service_count'] += 1 + else: + info['unknown_instance_state_count'] =+ 1 + if check_elb.health_check: info['health_check'] = { 'target': check_elb.health_check.target, From 5e674ddcfc3ab317d2aa05fc84dfe206768cfdab Mon Sep 17 00:00:00 2001 From: Robb Wagoner Date: Thu, 11 Dec 2014 08:21:03 -0700 Subject: [PATCH 084/114] include all launch config properties in the return make all properties available when registering the result which is useful when wanting to launch a stand-alone instance based upon an existing Launch Config. --- cloud/amazon/ec2_lc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 3c292377a58..592d179a02b 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -225,7 +225,8 @@ def create_launch_config(connection, module): module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=instance_type) + security_groups=result.security_groups, instance_type=result.instance_type, + result=result) def delete_launch_config(connection, module): From 6f6d7f5c18296e3fe84fa5aef674948753ff52ae Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 27 Jun 2015 00:10:25 -0400 Subject: [PATCH 085/114] updated docs to clarify use of exclusive --- system/authorized_key.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index bb223acbe4d..9d944a7d724 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -34,7 +34,6 @@ options: - The username on the remote host whose authorized_keys file will be modified required: true default: null - aliases: [] key: description: - The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys) @@ -72,9 +71,11 @@ options: version_added: "1.4" exclusive: description: - - Whether to remove all other non-specified keys from the - authorized_keys file. Multiple keys can be specified in a single - key= string value by separating them by newlines. + - Whether to remove all other non-specified keys from the authorized_keys file. Multiple keys + can be specified in a single C(key) string value by separating them by newlines. + - This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration + of the loop, if you want multiple keys in the file you need to pass them all to C(key) in a + single batch as mentioned above. required: false choices: [ "yes", "no" ] default: "no" From 2d3e93e55823d03891e1c6612e959ee785f17575 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 27 Jun 2015 00:36:55 -0400 Subject: [PATCH 086/114] added doc to note that git the command line tool is required for this moduel to function fixes http://github.com/ansible/ansible/issues/11358 --- source_control/git.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index 369430211f3..bc35c97da93 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -173,7 +173,8 @@ options: to be installed. The commit MUST be signed and the public key MUST be trusted in the GPG trustdb. - +requirements: + - git (the command line tool) notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, From 5e82f7e11e0ac7cc7cdaeffb0787209afae79fb0 Mon Sep 17 00:00:00 2001 From: ToBeReplaced Date: Sat, 27 Jun 2015 11:41:01 -0600 Subject: [PATCH 087/114] Make ALL_IN_SCHEMA for tables affect views ALL TABLES is considered to include views, so we must check for reltypes 'r' and 'v', not just 'r'. This bug was introduced due to using a custom, backwards-compatible version of "ALL TABLES IN SCHEMA". --- database/postgresql/postgresql_privs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index 10f2361bfb2..8fefd3de648 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -315,7 +315,7 @@ class Connection(object): query = """SELECT relname FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'r'""" + WHERE nspname = %s AND relkind in ('r', 'v')""" self.cursor.execute(query, (schema,)) return [t[0] for t in self.cursor.fetchall()] From 7b0b75ceedf526826ebf591709afea4c8fdde7bb Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 29 Jun 2015 10:34:24 -0400 Subject: [PATCH 088/114] Fix dict syntax typo --- cloud/openstack/os_client_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index a12cd8fe65a..2c4af5c8c08 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -52,9 +52,9 @@ EXAMPLES = ''' def main(): - module = AnsibleModule({ + module = AnsibleModule(argument_spec=dict( clouds=dict(required=False, default=[]), - }) + )) p = module.params try: From 73390f8ecc4df506a04a0406a42ecbea7d57501b Mon Sep 17 00:00:00 2001 From: James Meickle Date: Mon, 29 Jun 2015 13:23:03 -0400 Subject: [PATCH 089/114] Change uri debug example --- network/basics/uri.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 3de17c12d60..8095eaffe67 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -269,7 +269,7 @@ def url_filename(url): def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout, validate_certs): # To debug - #httplib2.debug = 4 + #httplib2.debuglevel = 4 # Handle Redirects if redirects == "all" or redirects == "yes": From 692045f693665f810736d0e07782e62bb4fb1f2d Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Mon, 29 Jun 2015 14:15:23 -0400 Subject: [PATCH 090/114] update docs for cloudformation --- cloud/amazon/cloudformation.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index dee292aeba3..cccdd156f20 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -51,6 +51,7 @@ options: template: description: - The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present" + Must give full path to the file, relative to the playbook. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" required: false default: null aliases: [] @@ -115,6 +116,22 @@ EXAMPLES = ''' tags: Stack: "ansible-cloudformation" +# Basic role example +- name: launch ansible cloudformation example + cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "roles/cloudformation/files/cloudformation-example.json" + template_parameters: + KeyName: "jmartin" + DiskType: "ephemeral" + InstanceType: "m1.small" + ClusterSize: 3 + tags: + Stack: "ansible-cloudformation" + # Removal example - name: tear down old deployment cloudformation: From e6fc129013b0dfd2873fad648a867cc87dc76cc6 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Mon, 29 Jun 2015 14:49:13 -0400 Subject: [PATCH 091/114] Add a note about the return value. --- cloud/openstack/os_keypair.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index a9c2640628f..f485d7fd2fc 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -33,6 +33,10 @@ extends_documentation_fragment: openstack version_added: "2.0" description: - Add or Remove key pair from OpenStack +notes: + - The module returns a dictionary describing the keypair, with + keys including: id, name, public_key. A private_key entry may + also be included if a keypair was generated for you. options: name: description: From def5fdcb2123b8a0146fe8b94bf19f82db3248a5 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Mon, 29 Jun 2015 15:14:50 -0400 Subject: [PATCH 092/114] no_log to iam password --- cloud/amazon/iam.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index bda953faab4..df8f3423411 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -509,7 +509,7 @@ def main(): groups=dict(type='list', default=None, required=False), state=dict( default=None, required=True, choices=['present', 'absent', 'update']), - password=dict(default=None, required=False), + password=dict(default=None, required=False, no_log=True), update_password=dict(default='always', required=False, choices=['always', 'on_create']), access_key_state=dict(default=None, required=False, choices=[ 'active', 'inactive', 'create', 'remove', From 5da9c6a1c77d40b4d52ac3ff9799e5bcb0ab3847 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 12:42:50 -0700 Subject: [PATCH 093/114] Add testing of docs to the core repo --- .travis.yml | 1 + test-docs.sh | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100755 test-docs.sh diff --git a/.travis.yml b/.travis.yml index 0e3a2af23b3..9a65ec487d3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,3 +14,4 @@ script: - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . + - ./test-docs.sh core diff --git a/test-docs.sh b/test-docs.sh new file mode 100755 index 00000000000..76297fbada6 --- /dev/null +++ b/test-docs.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -x + +CHECKOUT_DIR=".ansible-checkout" +MOD_REPO="$1" + +# Hidden file to avoid the module_formatter recursing into the checkout +git clone https://github.com/ansible/ansible "$CHECKOUT_DIR" +cd "$CHECKOUT_DIR" +git submodule update --init +rm -rf "lib/ansible/modules/$MOD_REPO" +ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO" + +pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx + +. ./hacking/env-setup +PAGER=/bin/cat bin/ansible-doc -l +if [ $? -ne 0 ] ; then + exit $? +fi +make -C docsite From 7970924bd56e2bbd53f6588b023ca3497afc6ebb Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Mon, 29 Jun 2015 15:55:15 -0400 Subject: [PATCH 094/114] Use newest documentation style for return value. --- cloud/openstack/os_keypair.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index f485d7fd2fc..7a0c1ca47a0 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -33,10 +33,6 @@ extends_documentation_fragment: openstack version_added: "2.0" description: - Add or Remove key pair from OpenStack -notes: - - The module returns a dictionary describing the keypair, with - keys including: id, name, public_key. A private_key entry may - also be included if a keypair was generated for you. options: name: description: @@ -78,6 +74,26 @@ EXAMPLES = ''' name: ansible_key ''' +RETURN = ''' +id: + description: Unique UUID. + returned: success + type: string +name: + description: Name given to the keypair. + returned: success + type: string +public_key: + description: The public key value for the keypair. + returned: success + type: string +private_key: + description: The private key value for the keypair. + returned: Only when a keypair is generated for the user (e.g., when creating one + and a public key is not specified). + type: string +''' + def _system_state_change(module, keypair): state = module.params['state'] From 7edacf6b1c480099eabd6f9ad9ad21d056ac4053 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 13:20:15 -0700 Subject: [PATCH 095/114] Use module.fail_json() instead of sys.exit() --- cloud/amazon/iam_policy.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index f1a6abdd0a6..26d65450ec9 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -112,13 +112,12 @@ task: ''' import json import urllib -import sys try: import boto import boto.iam + HAS_BOTO = True except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + HAS_BOTO = False def boto_exception(err): '''generic error message handler''' @@ -278,6 +277,9 @@ def main(): argument_spec=argument_spec, ) + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + state = module.params.get('state').lower() iam_type = module.params.get('iam_type').lower() state = module.params.get('state') From ddc78c82a4db6e8ee8c377fc08178e16fafdbbf0 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Mon, 29 Jun 2015 14:06:50 -0700 Subject: [PATCH 096/114] Document auto_floating_ip argument --- cloud/openstack/os_server.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 78a46f78c04..959f39880f8 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -90,6 +90,11 @@ options: - Ensure instance has public ip however the cloud wants to do that required: false default: 'yes' + auto_floating_ip: + description: + - If the module should automatically assign a floating IP + required: false + default: 'yes' floating_ips: description: - list of valid floating IPs that pre-exist to assign to this node From 4da3a724f1d57f5e1fe7f29804d82d835cceb3a5 Mon Sep 17 00:00:00 2001 From: Michael Weinrich Date: Sun, 21 Jun 2015 23:51:14 +0200 Subject: [PATCH 097/114] Fix connection creation to allow usage of profiles with boto --- cloud/amazon/iam_policy.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 26d65450ec9..72e70221d29 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -115,6 +115,7 @@ import urllib try: import boto import boto.iam + import boto.ec2 HAS_BOTO = True except ImportError: HAS_BOTO = False @@ -293,13 +294,10 @@ def main(): else: pdoc=None - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connection.IAMConnection( - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key, - ) + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) From 02ea210db9f60ab68b5ae4e18f7150f3e5993954 Mon Sep 17 00:00:00 2001 From: Andreas Reischuck Date: Sat, 27 Jun 2015 23:34:16 +0200 Subject: [PATCH 098/114] fixed win_file state=touch --- windows/win_file.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 index 0f3c20ec8e3..f8416120abf 100644 --- a/windows/win_file.ps1 +++ b/windows/win_file.ps1 @@ -56,7 +56,7 @@ If ( $state -eq "touch" ) } Else { - echo $null > $file + echo $null > $path } $result.changed = $TRUE } From 4ef5a45347558349f0fa23e138bf18559dd9a672 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 30 Jun 2015 05:08:38 -0700 Subject: [PATCH 099/114] Add version that the profilename param was added --- packaging/os/rhn_register.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/rhn_register.py b/packaging/os/rhn_register.py index 4207acc8c28..b67b442aa22 100644 --- a/packaging/os/rhn_register.py +++ b/packaging/os/rhn_register.py @@ -61,6 +61,7 @@ options: - supply an profilename for use with registration required: False default: null + version_added: "2.0" channels: description: - Optionally specify a list of comma-separated channels to subscribe to upon successful registration. From 8deee99fcc72852e7275746c2793976790881d50 Mon Sep 17 00:00:00 2001 From: verm666 Date: Tue, 30 Jun 2015 08:14:30 -0700 Subject: [PATCH 100/114] unarchive: fix @bcoca's remarks, issue #1575 --- files/unarchive.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index a3544253402..3ee83de0dcd 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -303,10 +303,7 @@ def main(): # skip working with 0 size archives try: if os.path.getsize(src) == 0: - res_args = { - 'changed': False - } - module.exit_json(**res_args) + module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src) except Exception, e: module.fail_json(msg="Source '%s' not readable" % src) From edad5c80ffc49706d44c98ee449c436b352a8817 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 30 Jun 2015 13:18:56 -0700 Subject: [PATCH 101/114] Few minor things from review of the pull request --- cloud/openstack/os_keypair.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index 7a0c1ca47a0..73656883a76 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -56,7 +56,7 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present -requirements: ["shade"] +requirements: [] ''' EXAMPLES = ''' @@ -163,4 +163,5 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() +if __name__ == '__main__': + main() From 02b6df3160e66f92ef0e0cea363bce9472ce94b5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Jul 2015 05:00:08 -0700 Subject: [PATCH 102/114] Fix indentation levels in os_keypair --- cloud/openstack/os_keypair.py | 42 +++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index 73656883a76..f62cc51bf64 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -32,30 +32,30 @@ short_description: Add/Delete a keypair from OpenStack extends_documentation_fragment: openstack version_added: "2.0" description: - - Add or Remove key pair from OpenStack + - Add or Remove key pair from OpenStack options: - name: - description: - - Name that has to be given to the key pair - required: true - default: None - public_key: - description: - - The public key that would be uploaded to nova and injected into VMs - upon creation. - required: false - default: None - public_key_file: - description: - - Path to local file containing ssh public key. Mutually exclusive - with public_key. + name: + description: + - Name that has to be given to the key pair + required: true + default: None + public_key: + description: + - The public key that would be uploaded to nova and injected into VMs + upon creation. required: false default: None - state: - description: - - Should the resource be present or absent. - choices: [present, absent] - default: present + public_key_file: + description: + - Path to local file containing ssh public key. Mutually exclusive + with public_key. + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present requirements: [] ''' From b00b3f2b3c57cae8131fb15abbd0ddb0f3515cfb Mon Sep 17 00:00:00 2001 From: verm666 Date: Wed, 1 Jul 2015 07:04:45 -0700 Subject: [PATCH 103/114] fix authorized_keys in check_mode This change is in response to issue #1515. Original pull request #1580. The original problem is: in authorized_key module you have no idea about users which will be created by Ansible at first run. I can propose next two ways to solve this problem: 1. Combine modules system/user.py and system/authorized_key.py in one module (so you will know everything about users in that module) 2. Use small workaround: add my commit and always provide 'path' parameter for authorized_key module during runs with --check option. --- system/authorized_key.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index bb223acbe4d..e52b4e7556a 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -138,7 +138,7 @@ import shlex class keydict(dict): """ a dictionary that maintains the order of keys as they are added """ - + # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class def __init__(self, *args, **kw): @@ -146,7 +146,7 @@ class keydict(dict): self.itemlist = super(keydict,self).keys() def __setitem__(self, key, value): self.itemlist.append(key) - super(keydict,self).__setitem__(key, value) + super(keydict,self).__setitem__(key, value) def __iter__(self): return iter(self.itemlist) def keys(self): @@ -154,7 +154,7 @@ class keydict(dict): def values(self): return [self[key] for key in self] def itervalues(self): - return (self[key] for key in self) + return (self[key] for key in self) def keyfile(module, user, write=False, path=None, manage_dir=True): """ @@ -168,6 +168,13 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): :return: full path string to authorized_keys for user """ + if module.check_mode: + if path is None: + module.fail_json(msg="You must provide full path to key file in check mode") + else: + keysfile = path + return keysfile + try: user_entry = pwd.getpwnam(user) except KeyError, e: @@ -214,8 +221,8 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): return keysfile def parseoptions(module, options): - ''' - reads a string containing ssh-key options + ''' + reads a string containing ssh-key options and returns a dictionary of those options ''' options_dict = keydict() #ordered dict @@ -246,7 +253,7 @@ def parsekey(module, raw_key): 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', - 'ecdsa-sha2-nistp521', + 'ecdsa-sha2-nistp521', 'ssh-dss', 'ssh-rsa', ] From 910728f6c3b49de97df9af2abc730ff589230754 Mon Sep 17 00:00:00 2001 From: Matthew Gilliard Date: Wed, 1 Jul 2015 12:07:27 +0100 Subject: [PATCH 104/114] Handle race condition in directory creation. If we try to make a directory, but someone else creates the directory at the same time as us, we don't need to raise that error to the user. They asked for the directory to exist, and now it does. This fixes the race condition which was causing that error to be raised, and closes #1648. --- files/file.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index 55d3665028e..ba5afd6809f 100644 --- a/files/file.py +++ b/files/file.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import errno import shutil import stat import grp @@ -280,7 +281,13 @@ def main(): if not os.path.isabs(path): curpath = curpath.lstrip('/') if not os.path.exists(curpath): - os.mkdir(curpath) + try: + os.mkdir(curpath) + except OSError, ex: + # Possibly something else created the dir since the os.path.exists + # check above. As long as it's a dir, we don't need to error out. + if not (ex.errno == errno.EEXISTS and os.isdir(curpath)): + raise tmp_file_args = file_args.copy() tmp_file_args['path']=curpath changed = module.set_fs_attributes_if_different(tmp_file_args, changed) From 1b21e37fcbc135608b602bcc011bbcaeabd59ca3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Jul 2015 07:24:39 -0700 Subject: [PATCH 105/114] Disable travis docs checks --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 9a65ec487d3..91d1b9585d7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,4 +14,4 @@ script: - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . - - ./test-docs.sh core + #- ./test-docs.sh core From 5a254e6303b82f8fe73e6ab7b1579ac0c8e36e14 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:22:50 -0500 Subject: [PATCH 106/114] Replace tabbed indentation with spaces for mysql_db module --- database/mysql/mysql_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index e9a530811d4..c018ad143db 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -326,7 +326,7 @@ def main(): if state in ['dump','import']: if target is None: module.fail_json(msg="with state=%s target is required" % (state)) - if db == 'all': + if db == 'all': connect_to_db = 'mysql' db = 'mysql' all_databases = True From 9eb4219f79446c2302e346f6e4464ea2ead8626e Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:23:28 -0500 Subject: [PATCH 107/114] Replaced tabbed indentation with spaces for apt module --- packaging/os/apt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 09129a73fa5..9172c69763d 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -230,10 +230,10 @@ def package_status(m, pkgname, version, cache, state): try: provided_packages = cache.get_providing_packages(pkgname) if provided_packages: - is_installed = False + is_installed = False # when virtual package providing only one package, look up status of target package if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: - package = provided_packages[0] + package = provided_packages[0] installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install') if installed: is_installed = True From 4f43c4c09cf717b2cb0b59041f3e2da21cedf1a9 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:23:51 -0500 Subject: [PATCH 108/114] Replaced tabbed indentation with spaces for subversion module --- source_control/subversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/subversion.py b/source_control/subversion.py index e3ff6dbfba5..cae4702e174 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -121,7 +121,7 @@ class Subversion(object): def checkout(self): '''Creates new svn working directory if it does not already exist.''' self._exec(["checkout", "-r", self.revision, self.repo, self.dest]) - + def export(self, force=False): '''Export svn repo to directory''' cmd = ["export"] From b6b576abf6c2e73c8fd4a5308c0cfff00f6d300d Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:24:01 -0500 Subject: [PATCH 109/114] Replaced tabbed indentation with spaces for group module --- system/group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/group.py b/system/group.py index d952cb5c28c..53ab5f904dc 100644 --- a/system/group.py +++ b/system/group.py @@ -121,7 +121,7 @@ class Group(object): if len(cmd) == 1: return (None, '', '') if self.module.check_mode: - return (0, '', '') + return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) From c2d0fbd45ba882c8a211f645e22e029d8c0b8b2a Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:24:23 -0500 Subject: [PATCH 110/114] Remove unnecessary imports in a docs only file for win_copy --- windows/win_copy.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/windows/win_copy.py b/windows/win_copy.py index efdebc5a4a6..acc6c9ef2e0 100644 --- a/windows/win_copy.py +++ b/windows/win_copy.py @@ -18,8 +18,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import os -import time DOCUMENTATION = ''' --- From 5c17a99a1cbb31d1b834f2f623e87d851ab2a140 Mon Sep 17 00:00:00 2001 From: Mike Putnam Date: Wed, 1 Jul 2015 20:58:17 -0500 Subject: [PATCH 111/114] Upstream docs show launch_config_name as required. http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_AutoScalingGroup.html Fixes #11209 Ansible behavior is correct, this commit just updates the docs to reflect that correctness. --- cloud/amazon/ec2_asg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 54d051375e6..eaeb141825e 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -43,7 +43,7 @@ options: launch_config_name: description: - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. - required: false + required: true min_size: description: - Minimum number of instances in group From 8f0d462fd0e966fbb04e4fbcf4685a2fd600fee0 Mon Sep 17 00:00:00 2001 From: Robb Wagoner Date: Thu, 2 Jul 2015 06:16:43 -0700 Subject: [PATCH 112/114] remove double dict & fix increment bug --- cloud/amazon/ec2_elb_lb.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 9d626a98194..04be9e2813c 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -393,11 +393,11 @@ class ElbManager(object): # status of instances behind the ELB if info['instances']: - info['instance_health'] = [ dict({ - "instance_id": instance_state.instance_id, - "reason_code": instance_state.reason_code, - "state": instance_state.state, - }) for instance_state in self.elb_conn.describe_instance_health(self.name)] + info['instance_health'] = [ dict( + instance_id = instance_state.instance_id, + reason_code = instance_state.reason_code, + state = instance_state.state + ) for instance_state in self.elb_conn.describe_instance_health(self.name)] else: info['instance_health'] = [] @@ -409,7 +409,7 @@ class ElbManager(object): elif instance_state['state'] == "OutOfService": info['out_of_service_count'] += 1 else: - info['unknown_instance_state_count'] =+ 1 + info['unknown_instance_state_count'] += 1 if check_elb.health_check: info['health_check'] = { From 93754b903f6956a86891197debb83f801b809200 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Jul 2015 14:43:21 -0400 Subject: [PATCH 113/114] updated upgrade to a more sensible default as the previous was prone to confusion fixes #1667 --- packaging/os/apt.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 9172c69763d..19a7c426f5e 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -80,8 +80,8 @@ options: - 'Note: This does not upgrade a specific package, use state=latest for that.' version_added: "1.1" required: false - default: "yes" - choices: [ "yes", "safe", "full", "dist"] + default: "no" + choices: [ "no", "yes", "safe", "full", "dist"] dpkg_options: description: - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' @@ -548,7 +548,7 @@ def main(): default_release = dict(default=None, aliases=['default-release']), install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), - upgrade = dict(choices=['yes', 'safe', 'full', 'dist']), + upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) ), mutually_exclusive = [['package', 'upgrade', 'deb']], @@ -572,6 +572,10 @@ def main(): APT_GET_CMD = module.get_bin_path("apt-get") p = module.params + + if p['upgrade'] == 'no': + p['upgrade'] = None + if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]: module.fail_json(msg="Could not find aptitude. Please ensure it is installed.") From c3c2e6ab726f9ea28a7a5d37b2a466740843bb9a Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Sat, 4 Jul 2015 14:45:21 -0400 Subject: [PATCH 114/114] Update cloudformation.py Fix for inaccurate phrasing --- cloud/amazon/cloudformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index cccdd156f20..abde0ec375c 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -51,7 +51,7 @@ options: template: description: - The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present" - Must give full path to the file, relative to the playbook. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" + Must give full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" required: false default: null aliases: []