From cd8bfc7695b8bb2a6622afff04fd5d23752df815 Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Thu, 6 Aug 2015 16:37:48 -0400 Subject: [PATCH 01/75] Adds tmp_dest option to get_url module. Addresses the issue in ansible/ansible#9512 --- network/basics/get_url.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 5e39887df7f..d0cc02408fe 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -55,6 +55,14 @@ options: If C(dest) is a directory, the file will always be downloaded (regardless of the force option), but replaced only if the contents changed. required: true + tmp_dest: + description: + - absolute path of where temporary file is downloaded to. + - Defaults to TMPDIR, TEMP or TMP env variables or a platform specific value + - https://docs.python.org/2/library/tempfile.html#tempfile.tempdir + required: false + default: '' + version_added: '2.0' force: description: - If C(yes) and C(dest) is not a directory, will download the file every @@ -163,7 +171,7 @@ def url_filename(url): return 'index.html' return fn -def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None): +def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest=''): """ Download data from the url and store in a temporary file. @@ -179,7 +187,19 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head if info['status'] != 200: module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest) - fd, tempname = tempfile.mkstemp() + if tmp_dest != '': + # tmp_dest should be an existing dir + tmp_dest_is_dir = os.path.isdir(tmp_dest) + if not tmp_dest_is_dir: + if os.path.exists(tmp_dest): + module.fail_json(msg="%s is a file but should be a directory." % tmp_dest) + else: + module.fail_json(msg="%s directoy does not exist." % tmp_dest) + + fd, tempname = tempfile.mkstemp(dir=tmp_dest) + else: + fd, tempname = tempfile.mkstemp() + f = os.fdopen(fd, 'wb') try: shutil.copyfileobj(rsp, f) @@ -221,6 +241,7 @@ def main(): sha256sum = dict(default=''), timeout = dict(required=False, type='int', default=10), headers = dict(required=False, default=None), + tmp_dest = dict(required=False, default=''), ) module = AnsibleModule( @@ -235,7 +256,8 @@ def main(): sha256sum = module.params['sha256sum'] use_proxy = module.params['use_proxy'] timeout = module.params['timeout'] - + tmp_dest = os.path.expanduser(module.params['tmp_dest']) + # Parse headers to dict if module.params['headers']: try: @@ -279,7 +301,7 @@ def main(): last_mod_time = datetime.datetime.utcfromtimestamp(mtime) # download to tmpsrc - tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers) + tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest) # Now the request has completed, we can finally generate the final # destination file name from the info dict. From 8fa1e9515b125239617aa883b70ce6aaacefc8cc Mon Sep 17 00:00:00 2001 From: tobbe Date: Wed, 19 Aug 2015 22:42:49 +0200 Subject: [PATCH 02/75] Add suport for selinux user when adding a new user on selinux enabled systems --- system/user.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/system/user.py b/system/user.py index 7e3e4c01cd3..1a9c1f07926 100644 --- a/system/user.py +++ b/system/user.py @@ -49,6 +49,10 @@ options: - Optionally when used with the -u option, this option allows to change the user ID to a non-unique value. version_added: "1.1" + seuser: + required: false + description: + - Optionally sets the seuser type (user_u). group: required: false description: @@ -254,6 +258,7 @@ class User(object): self.name = module.params['name'] self.uid = module.params['uid'] self.non_unique = module.params['non_unique'] + self.seuser = module.params['seuser'] self.group = module.params['group'] self.groups = module.params['groups'] self.comment = module.params['comment'] @@ -321,6 +326,9 @@ class User(object): if self.non_unique: cmd.append('-o') + if self.seuser is not None: + cmd.append('-Z') + cmd.append(self.seuser) if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) @@ -2050,6 +2058,8 @@ def main(): shell=dict(default=None, type='str'), password=dict(default=None, type='str'), login_class=dict(default=None, type='str'), + # following options are specific to selinux + seuser=dict(default=None, type='str'), # following options are specific to userdel force=dict(default='no', type='bool'), remove=dict(default='no', type='bool'), From 30576ad0c7e411aaa2b78995c669f375d428b859 Mon Sep 17 00:00:00 2001 From: tobbe Date: Sat, 5 Sep 2015 14:39:52 +0200 Subject: [PATCH 03/75] add text to the description, more user friendly --- system/user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/user.py b/system/user.py index 1a9c1f07926..8303bed0d10 100644 --- a/system/user.py +++ b/system/user.py @@ -52,7 +52,7 @@ options: seuser: required: false description: - - Optionally sets the seuser type (user_u). + - Optionally sets the seuser type (user_u) on selinux enabled systems. group: required: false description: From 83074ad501d84ebd224f782e141847562529a346 Mon Sep 17 00:00:00 2001 From: Mike Christofilopoulos Date: Thu, 1 Oct 2015 17:29:21 +0100 Subject: [PATCH 04/75] add new disks automatically when the 'vm_disk' section changes --- cloud/vmware/vsphere_guest.py | 72 ++++++++++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index b8adb7930c3..85e184cc318 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python2 # -*- coding: utf-8 -*- # This file is part of Ansible @@ -702,11 +702,75 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo ) +def update_disks(vsphere_client, vm, module, vm_disk, changes): + request = VI.ReconfigVM_TaskRequestMsg() + changed = False + + for cnf_disk in vm_disk: + disk_id = re.sub("disk", "", cnf_disk) + found = False + for dev_key in vm._devices: + if vm._devices[dev_key]['type'] == 'VirtualDisk': + hdd_id = vm._devices[dev_key]['label'].split()[2] + if disk_id == hdd_id: + found = True + continue + if not found: + it = VI.ReconfigVM_TaskRequestMsg() + _this = request.new__this(vm._mor) + _this.set_attribute_type(vm._mor.get_attribute_type()) + request.set_element__this(_this) + + spec = request.new_spec() + + dc = spec.new_deviceChange() + dc.Operation = "add" + dc.FileOperation = "create" + + hd = VI.ns0.VirtualDisk_Def("hd").pyclass() + hd.Key = -100 + hd.UnitNumber = int(disk_id) + hd.CapacityInKB = int(vm_disk[cnf_disk]['size_gb']) * 1024 * 1024 + hd.ControllerKey = 1000 + + # module.fail_json(msg="peos : %s" % vm_disk[cnf_disk]) + backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("backing").pyclass() + backing.FileName = "[%s]" % vm_disk[cnf_disk]['datastore'] + backing.DiskMode = "persistent" + backing.Split = False + backing.WriteThrough = False + backing.ThinProvisioned = False + backing.EagerlyScrub = False + hd.Backing = backing + + dc.Device = hd + + spec.DeviceChange = [dc] + request.set_element_spec(spec) + + ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval + + # Wait for the task to finish + task = VITask(ret, vsphere_client) + status = task.wait_for_state([task.STATE_SUCCESS, + task.STATE_ERROR]) + + if status == task.STATE_SUCCESS: + changed = True + changes[cnf_disk] = vm_disk[cnf_disk] + elif status == task.STATE_ERROR: + module.fail_json( + msg="Error reconfiguring vm: %s, [%s]" % ( + task.get_error_message(), + vm_disk[cnf_disk])) + return changed, changes + + def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force): spec = None changed = False changes = {} - request = VI.ReconfigVM_TaskRequestMsg() + request = None shutdown = False poweron = vm.is_powered_on() @@ -714,6 +778,10 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled) cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled) + changed, changes = update_disks(vsphere_client, vm, + module, vm_disk, changes) + request = VI.ReconfigVM_TaskRequestMsg() + # Change Memory if 'memory_mb' in vm_hardware: From 8c9a9c0802f31a44a3fca401b027e9abea99da61 Mon Sep 17 00:00:00 2001 From: Mike Date: Thu, 1 Oct 2015 17:38:46 +0100 Subject: [PATCH 05/75] update_disks(): added origins of the code. --- cloud/vmware/vsphere_guest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 85e184cc318..f13eebbf2df 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -701,7 +701,8 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo msg="Could not clone selected machine: %s" % e ) - +# example from https://github.com/kalazzerx/pysphere/blob/master/examples/pysphere_create_disk_and_add_to_vm.py +# was used. def update_disks(vsphere_client, vm, module, vm_disk, changes): request = VI.ReconfigVM_TaskRequestMsg() changed = False From 9b04ca55f1526b57f90f5cf12e30c3920753480a Mon Sep 17 00:00:00 2001 From: Michael Fenn Date: Sat, 3 Oct 2015 14:31:22 -0400 Subject: [PATCH 06/75] Support cloning VMs into a specific VM folder The pysphere VIVirtualMachine.clone() method supports specifying a VM folder to place the VM in after the clone has completed. This exposes that functionality to playbooks. Also documents that creating VMs could always place VMs in a specific folder. --- cloud/vmware/vsphere_guest.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index b8adb7930c3..7c3513a8d27 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -170,6 +170,7 @@ EXAMPLES = ''' vcpu.hotadd: yes mem.hotadd: yes notes: This is a test VM + folder: MyFolder vm_disk: disk1: size_gb: 10 @@ -241,6 +242,8 @@ EXAMPLES = ''' template_src: centosTemplate cluster: MainCluster resource_pool: "/Resources" + vm_extra_config: + folder: MyFolder # Task to gather facts from a vSphere cluster only if the system is a VMWare guest @@ -597,7 +600,7 @@ def vmdisk_id(vm, current_datastore_name): return id_list -def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone, power_on_after_clone): +def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone, power_on_after_clone, vm_extra_config): vmTemplate = vsphere_client.get_vm_by_name(template_src) vmTarget = None @@ -689,6 +692,10 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo cloneArgs["linked"] = True cloneArgs["snapshot"] = snapshot_to_clone + if vm_extra_config.get("folder") is not None: + # if a folder is specified, clone the VM into it + cloneArgs["folder"] = vm_extra_config.get("folder") + vmTemplate.clone(guest, **cloneArgs) changed = True else: @@ -1455,7 +1462,8 @@ def main(): module=module, cluster_name=cluster, snapshot_to_clone=snapshot_to_clone, - power_on_after_clone=power_on_after_clone + power_on_after_clone=power_on_after_clone, + vm_extra_config=vm_extra_config ) if state in ['restarted', 'reconfigured']: From d0b30dd86de218dc27449d42134b2487ab0b3880 Mon Sep 17 00:00:00 2001 From: Kai Webber Date: Tue, 6 Oct 2015 20:26:10 +0300 Subject: [PATCH 07/75] Added launch group support for ec2 module --- cloud/amazon/ec2.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6572a9286f4..aed6d757a68 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -247,6 +247,13 @@ options: required: false default: null aliases: ['network_interface'] + spot_launch_group: + version_added: "2.0" + description: + - Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group) + required: false + default: null + aliases: [] author: - "Tim Gerla (@tgerla)" @@ -358,6 +365,7 @@ EXAMPLES = ''' wait: yes vpc_subnet_id: subnet-29e63245 assign_public_ip: yes + spot_launch_group: report_generators # Examples using pre-existing network interfaces - ec2: @@ -858,6 +866,7 @@ def create_instances(module, ec2, vpc, override_count=None): source_dest_check = module.boolean(module.params.get('source_dest_check')) termination_protection = module.boolean(module.params.get('termination_protection')) network_interfaces = module.params.get('network_interfaces') + spot_launch_group = module.params.get('spot_launch_group') # group_id and group_name are exclusive of each other if group_id and group_name: @@ -1040,6 +1049,9 @@ def create_instances(module, ec2, vpc, override_count=None): module.fail_json( msg="placement_group parameter requires Boto version 2.3.0 or higher.") + if spot_launch_group and isinstance(spot_launch_group, basestring): + params['launch_group'] = spot_launch_group + params.update(dict( count = count_remaining, type = spot_type, @@ -1304,6 +1316,7 @@ def main(): instance_type = dict(aliases=['type']), spot_price = dict(), spot_type = dict(default='one-time', choices=["one-time", "persistent"]), + spot_launch_group = dict(), image = dict(), kernel = dict(), count = dict(type='int', default='1'), From 3016d360f4accac987938f72c7d5c82acba3ae10 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 19 Oct 2015 16:38:15 -0400 Subject: [PATCH 08/75] better handling of checkmode for solaris fixes #2296 --- system/user.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/system/user.py b/system/user.py index 499228953b2..c2e4956f897 100755 --- a/system/user.py +++ b/system/user.py @@ -1352,20 +1352,21 @@ class SunOS(User): cmd.append('-s') cmd.append(self.shell) - if self.module.check_mode: - return (0, '', '') - else: - # modify the user if cmd will do anything - if cmd_len != len(cmd): + # modify the user if cmd will do anything + if cmd_len != len(cmd): + (rc, out, err) = (0, '', '') + if not self.module.check_mode: cmd.append(self.name) (rc, out, err) = self.execute_command(cmd) if rc is not None and rc != 0: self.module.fail_json(name=self.name, msg=err, rc=rc) - else: - (rc, out, err) = (None, '', '') + else: + (rc, out, err) = (None, '', '') - # we have to set the password by editing the /etc/shadow file - if self.update_password == 'always' and self.password is not None and info[1] != self.password: + # we have to set the password by editing the /etc/shadow file + if self.update_password == 'always' and self.password is not None and info[1] != self.password: + (rc, out, err) = (0, '', '') + if not self.module.check_mode: try: lines = [] for line in open(self.SHADOWFILE, 'rb').readlines(): @@ -1382,7 +1383,7 @@ class SunOS(User): except Exception, err: self.module.fail_json(msg="failed to update users password: %s" % str(err)) - return (rc, out, err) + return (rc, out, err) # =========================================== class DarwinUser(User): From 1766c5082418d31d3c9444abc5df6a36937f3ed2 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Tue, 27 Oct 2015 12:09:01 -0400 Subject: [PATCH 09/75] Update documentation to reflect need for mysql client --- database/mysql/mysql_db.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 33720f5d4f6..9761271f058 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -85,12 +85,15 @@ notes: - Requires the MySQLdb Python package on the remote host. For Ubuntu, this is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this is as easy as yum install MySQL-python. (See M(yum).) + - Requires the mysql command line client. For Centos/Fedora, this is as easy as + yum install mariadb (See M(yum).). For Debian/Ubuntu this is as easy as + apt-get install mariadb-client. (See M(apt).) - Both I(login_password) and I(login_user) are required when you are passing credentials. If none are present, the module will attempt to read the credentials from C(~/.my.cnf), and finally fall back to using the MySQL default login of C(root) with no password. requirements: [ ConfigParser ] -author: "Mark Theunissen (@marktheunissen)" +author: "Ansible Core Team" ''' EXAMPLES = ''' From b90318ae6ce84789d819f0a4a76a45937d4c3e8c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 29 Oct 2015 17:16:23 -0400 Subject: [PATCH 10/75] loop to get all load balancers, boto limited to 400 at a time fixes #2115 --- cloud/amazon/ec2_elb.py | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 9f333764a5d..7e383d1539d 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -50,10 +50,10 @@ options: choices: [ "yes", "no" ] wait: description: - - Wait for instance registration or deregistration to complete successfully before returning. + - Wait for instance registration or deregistration to complete successfully before returning. required: false default: yes - choices: [ "yes", "no" ] + choices: [ "yes", "no" ] validate_certs: description: - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. @@ -87,7 +87,7 @@ roles: - myrole post_tasks: - name: Instance Register - local_action: + local_action: module: ec2_elb instance_id: "{{ ansible_ec2_instance_id }}" ec2_elbs: "{{ item }}" @@ -256,12 +256,23 @@ class ElbManager: ec2_elbs = self._get_auto_scaling_group_lbs() try: - elb = connect_to_aws(boto.ec2.elb, self.region, - **self.aws_connect_params) + elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) - elbs = elb.get_all_load_balancers() + elbs = [] + marker = None + while True: + try: + newelbs = elb.get_all_load_balancers(marker=marker) + if not newelbs.is_truncated: + break + elbs.extend(newelbs) + marker = newelbs.next_marker + except TypeError: + # Older version of boto do not allow for params + elbs = elb.get_all_load_balancers() + break if ec2_elbs: lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) @@ -302,8 +313,7 @@ class ElbManager: def _get_instance(self): """Returns a boto.ec2.InstanceObject for self.instance_id""" try: - ec2 = connect_to_aws(boto.ec2, self.region, - **self.aws_connect_params) + ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] @@ -330,7 +340,7 @@ def main(): region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: + if not region: module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") ec2_elbs = module.params['ec2_elbs'] @@ -342,8 +352,7 @@ def main(): module.fail_json(msg="ELBs are required for registration") instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, - region=region, **aws_connect_params) + elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params) if ec2_elbs is not None: for elb in ec2_elbs: From 794cbeea231aabfdafaf3d5dfcdb706cec037afe Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 2 Nov 2015 13:10:20 -0500 Subject: [PATCH 11/75] use marker instead of is_truncated which does not seem to work --- cloud/amazon/ec2_elb.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 7e383d1539d..5b3b24dacc2 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -265,10 +265,10 @@ class ElbManager: while True: try: newelbs = elb.get_all_load_balancers(marker=marker) - if not newelbs.is_truncated: + marker = newelbs.next_marker + if not marker: break elbs.extend(newelbs) - marker = newelbs.next_marker except TypeError: # Older version of boto do not allow for params elbs = elb.get_all_load_balancers() From 76cd8381f8f6497a410370b6e7293b7787f7b48d Mon Sep 17 00:00:00 2001 From: Rabenstein Date: Wed, 4 Nov 2015 14:54:46 +0100 Subject: [PATCH 12/75] Absent unction was not working on user with login profile also fixed the exception handling --- cloud/amazon/iam.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 8864cb10a6f..8f068a942c4 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -192,14 +192,18 @@ def create_user(module, iam, name, pwd, path, key_state, key_count): def delete_user(module, iam, name): + del_meta = '' try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] for key in current_keys: iam.delete_access_key(key, name) + login_profile = iam.get_login_profiles(name) + if login_profile: + iam.delete_login_profile(name) del_meta = iam.delete_user(name).delete_user_response - except boto.exception.BotoServerError, err: - error_msg = boto_exception(err) + except Exception as ex: + module.fail_json(changed=False, msg="delete failed %s" %ex) if ('must detach all policies first') in error_msg: for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: iam.delete_user_policy(name, policy) @@ -213,7 +217,7 @@ def delete_user(module, iam, name): "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: - module.fail_json(changed=changed, msg=str(err)) + module.fail_json(changed=changed, msg=str(del_meta)) else: changed = True return del_meta, name, changed @@ -647,15 +651,20 @@ def main(): else: module.exit_json( changed=changed, groups=user_groups, user_name=name, keys=key_list) + elif state == 'update' and not user_exists: module.fail_json( msg="The user %s does not exit. No update made." % name) + elif state == 'absent': - if name in orig_user_list: - set_users_groups(module, iam, name, '') - del_meta, name, changed = delete_user(module, iam, name) - module.exit_json( - deletion_meta=del_meta, deleted_user=name, changed=changed) + if user_exists: + try: + set_users_groups(module, iam, name, '') + del_meta, name, changed = delete_user(module, iam, name) + module.exit_json(deleted_user=name, changed=changed, orig_user_list=orig_user_list) + + except Exception as ex: + module.fail_json(changed=changed, msg=str(ex)) else: module.exit_json( changed=False, msg="User %s is already absent from your AWS IAM users" % name) @@ -687,9 +696,11 @@ def main(): if not new_path and not new_name: module.exit_json( changed=changed, group_name=name, group_path=cur_path) + elif state == 'update' and not group_exists: module.fail_json( changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) + elif state == 'absent': if name in orig_group_list: removed_group, changed = delete_group(iam=iam, name=name) From 3d8f0b5d95691f76a3849f67b2d3f5bda4a908aa Mon Sep 17 00:00:00 2001 From: Rabenstein Date: Wed, 4 Nov 2015 15:34:48 +0100 Subject: [PATCH 13/75] fixed the delete user function now works with or without loginprofile (password) --- cloud/amazon/iam.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 8f068a942c4..86c9723afe5 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -198,10 +198,16 @@ def delete_user(module, iam, name): iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] for key in current_keys: iam.delete_access_key(key, name) - login_profile = iam.get_login_profiles(name) - if login_profile: - iam.delete_login_profile(name) - del_meta = iam.delete_user(name).delete_user_response + try: + login_profile = iam.get_login_profiles(name).get_login_profile_response + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('Cannot find Login Profile') in error_msg: + + del_meta = iam.delete_user(name).delete_user_response + else: + iam.delete_login_profile(name) + del_meta = iam.delete_user(name).delete_user_response except Exception as ex: module.fail_json(changed=False, msg="delete failed %s" %ex) if ('must detach all policies first') in error_msg: @@ -661,7 +667,7 @@ def main(): try: set_users_groups(module, iam, name, '') del_meta, name, changed = delete_user(module, iam, name) - module.exit_json(deleted_user=name, changed=changed, orig_user_list=orig_user_list) + module.exit_json(deleted_user=name, changed=changed) except Exception as ex: module.fail_json(changed=changed, msg=str(ex)) From b655b6ae2683c12f660dcb4f9483496ef814a3c6 Mon Sep 17 00:00:00 2001 From: Rabenstein Date: Wed, 4 Nov 2015 15:39:34 +0100 Subject: [PATCH 14/75] typo --- cloud/amazon/iam.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 86c9723afe5..5aef25a2602 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -223,7 +223,7 @@ def delete_user(module, iam, name): "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: - module.fail_json(changed=changed, msg=str(del_meta)) + module.fail_json(changed=changed, msg=str(error_msg)) else: changed = True return del_meta, name, changed From dd26c37f6b4454ceaffe1b298f3626979c65cf2f Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 6 Nov 2015 17:43:24 +0000 Subject: [PATCH 15/75] Update ec2_elb_lb.py add connection draining default --- cloud/amazon/ec2_elb_lb.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 954f06496ae..8488b78a110 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -107,6 +107,7 @@ options: description: - Wait a specified timeout allowing connections to drain before terminating an instance required: false + default: "no" aliases: [] version_added: "1.8" idle_timeout: From 7d665db5e5ed20f036b28885c2b8f03c9285c631 Mon Sep 17 00:00:00 2001 From: Rabenstein Date: Wed, 4 Nov 2015 14:54:46 +0100 Subject: [PATCH 16/75] Squash of 3 commits for bugfix. Absent unction was not working on user with login profile also fixed the exception handling fixed the delete user function now works with or without loginprofile (password) typo --- cloud/amazon/iam.py | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 8864cb10a6f..5aef25a2602 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -192,14 +192,24 @@ def create_user(module, iam, name, pwd, path, key_state, key_count): def delete_user(module, iam, name): + del_meta = '' try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] for key in current_keys: iam.delete_access_key(key, name) - del_meta = iam.delete_user(name).delete_user_response - except boto.exception.BotoServerError, err: - error_msg = boto_exception(err) + try: + login_profile = iam.get_login_profiles(name).get_login_profile_response + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('Cannot find Login Profile') in error_msg: + + del_meta = iam.delete_user(name).delete_user_response + else: + iam.delete_login_profile(name) + del_meta = iam.delete_user(name).delete_user_response + except Exception as ex: + module.fail_json(changed=False, msg="delete failed %s" %ex) if ('must detach all policies first') in error_msg: for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: iam.delete_user_policy(name, policy) @@ -213,7 +223,7 @@ def delete_user(module, iam, name): "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: - module.fail_json(changed=changed, msg=str(err)) + module.fail_json(changed=changed, msg=str(error_msg)) else: changed = True return del_meta, name, changed @@ -647,15 +657,20 @@ def main(): else: module.exit_json( changed=changed, groups=user_groups, user_name=name, keys=key_list) + elif state == 'update' and not user_exists: module.fail_json( msg="The user %s does not exit. No update made." % name) + elif state == 'absent': - if name in orig_user_list: - set_users_groups(module, iam, name, '') - del_meta, name, changed = delete_user(module, iam, name) - module.exit_json( - deletion_meta=del_meta, deleted_user=name, changed=changed) + if user_exists: + try: + set_users_groups(module, iam, name, '') + del_meta, name, changed = delete_user(module, iam, name) + module.exit_json(deleted_user=name, changed=changed) + + except Exception as ex: + module.fail_json(changed=changed, msg=str(ex)) else: module.exit_json( changed=False, msg="User %s is already absent from your AWS IAM users" % name) @@ -687,9 +702,11 @@ def main(): if not new_path and not new_name: module.exit_json( changed=changed, group_name=name, group_path=cur_path) + elif state == 'update' and not group_exists: module.fail_json( changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) + elif state == 'absent': if name in orig_group_list: removed_group, changed = delete_group(iam=iam, name=name) From 2c95641d66d5ca0eac3bb95d7361a71eb89758d1 Mon Sep 17 00:00:00 2001 From: Steve Spencer Date: Wed, 11 Nov 2015 16:44:01 +0200 Subject: [PATCH 17/75] Add support for mounting host volumes with Z and z options --- cloud/docker/docker.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 0ecfb93b0c2..12e7851f910 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -80,7 +80,7 @@ options: volumes: description: - List of volumes to mount within the container using docker CLI-style - - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".' + - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw", "ro", "Z", "z".' default: null volumes_from: description: @@ -626,14 +626,14 @@ class DockerManager(object): # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) elif 2 <= len(parts) <= 3: # default to read-write - ro = False + mode = 'rw' # with supplied bind mode if len(parts) == 3: - if parts[2] not in ['ro', 'rw']: - self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') + if parts[2] not in ['ro', 'rw', 'z', 'Z']: + self.module.fail_json(msg='bind mode needs to be one of "ro", "rw", "z", or "Z"') else: - ro = parts[2] == 'ro' - self.binds[parts[0]] = {'bind': parts[1], 'ro': ro } + mode = parts[2] + self.binds[parts[0]] = {'bind': parts[1], 'mode': mode } else: self.module.fail_json(msg='volumes support 1 to 3 arguments') @@ -1197,10 +1197,7 @@ class DockerManager(object): for host_path, config in self.binds.iteritems(): if isinstance(config, dict): container_path = config['bind'] - if config['ro']: - mode = 'ro' - else: - mode = 'rw' + mode = config['mode'] else: container_path = config mode = 'rw' From fc5da26deeda577e8034b249caea6b7399b556d3 Mon Sep 17 00:00:00 2001 From: Steve Spencer Date: Thu, 12 Nov 2015 10:42:26 +0200 Subject: [PATCH 18/75] Sync up with allowable docker volume mounting modes --- cloud/docker/docker.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 12e7851f910..c6cf10f0783 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -79,8 +79,10 @@ options: version_added: "1.5" volumes: description: - - List of volumes to mount within the container using docker CLI-style - - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw", "ro", "Z", "z".' + - List of volumes to mount within the container + - 'Use docker CLI-style syntax: C(/host:/container[:mode])' + - You can specify a read mode for the mount with either C(ro) or C(rw). SELinux hosts can additionally + use C(z) or C(Z) mount options to use a shared or private label for the volume. default: null volumes_from: description: @@ -629,8 +631,8 @@ class DockerManager(object): mode = 'rw' # with supplied bind mode if len(parts) == 3: - if parts[2] not in ['ro', 'rw', 'z', 'Z']: - self.module.fail_json(msg='bind mode needs to be one of "ro", "rw", "z", or "Z"') + if parts[2] not in ["rw", "rw,Z", "rw,z", "z,rw", "Z,rw", "Z", "z", "ro", "ro,Z", "ro,z", "z,ro", "Z,ro"]: + self.module.fail_json(msg='invalid bind mode ' + parts[2]) else: mode = parts[2] self.binds[parts[0]] = {'bind': parts[1], 'mode': mode } From e9c548da417f90b990aff3a2036abc52abb2bf37 Mon Sep 17 00:00:00 2001 From: Maarten Claes Date: Wed, 18 Nov 2015 13:12:59 +0100 Subject: [PATCH 19/75] This fixes copy with the remote_src option It was broken in 6e37f1dcef0 when the remote_src was added. Need to pass the absolute path to copy2 instead of a tuple. --- files/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/copy.py b/files/copy.py index 812b2d9ab7d..5dd1e9935e6 100644 --- a/files/copy.py +++ b/files/copy.py @@ -310,7 +310,7 @@ def main(): if rc != 0: module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc,err)) if remote_src: - tmpdest = tempfile.mkstemp(dir=os.basedir(dest)) + _, tmpdest = tempfile.mkstemp(dir=os.path.dirname(dest)) shutil.copy2(src, tmpdest) module.atomic_move(tmpdest, dest) else: From 4bc834485ae648596ad443d6527d77844c2c54e5 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Wed, 18 Nov 2015 19:45:32 +0000 Subject: [PATCH 20/75] Update ec2_elb_lb.py --- cloud/amazon/ec2_elb_lb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 8488b78a110..1d9b2db283e 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -107,7 +107,7 @@ options: description: - Wait a specified timeout allowing connections to drain before terminating an instance required: false - default: "no" + default: "None" aliases: [] version_added: "1.8" idle_timeout: From 2a306c6b8cb4beeacc06a9ba37311c96b17d3413 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 18 Nov 2015 14:55:17 -0500 Subject: [PATCH 21/75] Fix os_user_group module This module had a couple of errors in it. Also added check mode support. --- cloud/openstack/os_user_group.py | 51 +++++++++++++++++++------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/cloud/openstack/os_user_group.py b/cloud/openstack/os_user_group.py index 37b76933c38..b2be24c74b2 100644 --- a/cloud/openstack/os_user_group.py +++ b/cloud/openstack/os_user_group.py @@ -17,7 +17,6 @@ try: import shade - from shade import meta HAS_SHADE = True except ImportError: HAS_SHADE = False @@ -28,6 +27,7 @@ module: os_user_group short_description: Associate OpenStack Identity users and groups extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Add and remove users from groups options: @@ -51,57 +51,66 @@ requirements: EXAMPLES = ''' # Add the demo user to the demo group -- os_user_group: user=demo group=demo +- os_user_group: + cloud: mycloud + user: demo + group: demo ''' -def main(): +def _system_state_change(state, in_group): + if state == 'present' and not in_group: + return True + if state == 'absent' and in_group: + return True + return False +def main(): argument_spec = openstack_full_argument_spec( - argument_spec = dict( user=dict(required=True), group=dict(required=True), state=dict(default='present', choices=['absent', 'present']), - )) + ) module_kwargs = openstack_module_kwargs() - module = AnsibleModule(argument_spec, **module_kwargs) + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') - user = module.params.pop('user') - group = module.params.pop('group') - state = module.params.pop('state') + user = module.params['user'] + group = module.params['group'] + state = module.params['state'] try: - cloud = shade.openstack_cloud(**module.params) + cloud = shade.operator_cloud(**module.params) in_group = cloud.is_user_in_group(user, group) - if state == 'present': + if module.check_mode: + module.exit_json(changed=_system_state_change(state, in_group)) - if in_group: - changed = False - else: - cloud.add_user_to_group( - user_name_or_id=user, group_name_or_id=group) + changed = False + if state == 'present': + if not in_group: + cloud.add_user_to_group(user, group) changed = True + elif state == 'absent': if in_group: - cloud.remove_user_from_group( - user_name_or_id=user, group_name_or_id=group) + cloud.remove_user_from_group(user, group) changed=True - else: - changed=False + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message, extra_data=e.extra_data) + from ansible.module_utils.basic import * from ansible.module_utils.openstack import * - if __name__ == '__main__': main() From 062c7764e63e7ff59efe80cd1bf4887eb625ae3c Mon Sep 17 00:00:00 2001 From: Mike Riddle Date: Wed, 18 Nov 2015 15:10:15 -0500 Subject: [PATCH 22/75] Fixed error message: TypeError: fail_json() takes exactly 1 argument (2 given) --- cloud/amazon/iam_policy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index d5b3daa7d5e..7038612d88e 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -188,7 +188,7 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state): # Role doesn't exist so it's safe to assume the policy doesn't either module.exit_json(changed=False) else: - module.fail_json(e.message) + module.fail_json(msg=e.message) try: for pol in current_policies: From 1d6b31a90f8569029afea5dc8e459529b8c976fb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 19 Nov 2015 09:28:40 -0800 Subject: [PATCH 23/75] fixed break order --- cloud/amazon/ec2_elb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 5b3b24dacc2..4e19a054bd1 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -266,9 +266,9 @@ class ElbManager: try: newelbs = elb.get_all_load_balancers(marker=marker) marker = newelbs.next_marker + elbs.extend(newelbs) if not marker: break - elbs.extend(newelbs) except TypeError: # Older version of boto do not allow for params elbs = elb.get_all_load_balancers() From ade721cc5d3383dd78278041429db30f004a8e02 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Wed, 18 Nov 2015 17:52:05 -0500 Subject: [PATCH 24/75] Remove note about only using win_copy for small files. --- windows/win_copy.py | 10 ---------- 1 file changed, 10 deletions(-) mode change 100644 => 100755 windows/win_copy.py diff --git a/windows/win_copy.py b/windows/win_copy.py old mode 100644 new mode 100755 index acc6c9ef2e0..a222a928f09 --- a/windows/win_copy.py +++ b/windows/win_copy.py @@ -44,16 +44,6 @@ options: required: true default: null author: "Jon Hawkesworth (@jhawkesworth)" -notes: - - The "win_copy" module is best used for small files only. - This module should **not** be used for files bigger than 3Mb as - this will result in a 500 response from the winrm host - and it will not be possible to connect via winrm again until the - windows remote management service has been restarted on the - windows host. - Files larger than 1Mb will take minutes to transfer. - The recommended way to transfer large files is using win_get_url - or collecting from a windows file share folder. ''' EXAMPLES = ''' From 13343a88881e5ac174213a8227c263c4d18e6c95 Mon Sep 17 00:00:00 2001 From: Chris Streeter Date: Thu, 19 Nov 2015 14:00:35 -0800 Subject: [PATCH 25/75] Fix name of ssh_opts arg --- source_control/git.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index d42b284abc5..bdc87b034a2 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -55,7 +55,7 @@ options: version_added: "1.5" description: - if C(yes), adds the hostkey for the repo url if not already - added. If ssh_args contains "-o StrictHostKeyChecking=no", + added. If ssh_opts contains "-o StrictHostKeyChecking=no", this parameter is ignored. ssh_opts: required: false From 3b8147af3044ed9c5d628e2f92e4795688f742c2 Mon Sep 17 00:00:00 2001 From: Daniel Donckers Date: Fri, 20 Nov 2015 14:55:39 -0600 Subject: [PATCH 26/75] Fixes #822 --- cloud/amazon/route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index ec4dc533005..72eac85b4fc 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -295,7 +295,7 @@ def main(): overwrite = dict(required=False, type='bool'), retry_interval = dict(required=False, default=500), private_zone = dict(required=False, type='bool', default=False), - identifier = dict(required=False), + identifier = dict(required=False, default=None), weight = dict(required=False, type='int'), region = dict(required=False), health_check = dict(required=False), @@ -391,7 +391,7 @@ def main(): #Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block rset.name = decoded_name - if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == str(identifier_in): + if rset.type == type_in and decoded_name.lower() == record_in.lower() and str(rset.identifier) == str(identifier_in): found_record = True record['zone'] = zone_in record['type'] = rset.type From 39ef6a1a80d0bc0c3c2b58d39fb8b59959baa17d Mon Sep 17 00:00:00 2001 From: Keith Hassen Date: Sun, 22 Nov 2015 21:53:21 -0500 Subject: [PATCH 27/75] Fail if any group name is not resolved to an ID. --- cloud/amazon/ec2.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index ac2d58064be..b14c3c7d961 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -883,6 +883,9 @@ def create_instances(module, ec2, vpc, override_count=None): grp_details = ec2.get_all_security_groups() if isinstance(group_name, basestring): group_name = [group_name] + unmatched = list(set(group_name) - set([str(grp.name) for grp in grp_details])) + if len(unmatched) > 0: + module.fail_json(msg="the following group names are not valid: %s" % ','.join(unmatched)) group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] # Now we try to lookup the group id testing if group exists. elif group_id: From 1bc0b6ee6a1f70a2e29c33e1f77414b19e2fb126 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Mon, 23 Nov 2015 22:51:08 +0900 Subject: [PATCH 28/75] create non-existent ini file fixing fail_json more verbose fail msg --- files/ini_file.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/files/ini_file.py b/files/ini_file.py index ce286741981..82d4621dfbb 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -97,6 +97,7 @@ EXAMPLES = ''' import ConfigParser import sys +import os # ============================================================== # do_ini @@ -104,6 +105,11 @@ import sys def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False): + if not os.path.exists(filename): + try: + open(filename,'w').close() + except: + module.fail_json(msg="Destination file %s not writable" % filename) ini_file = open(filename, 'r') try: ini_lines = ini_file.readlines() From d13741314ea3fceaef4ef69eab6b1a7b4da9e901 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Mon, 23 Nov 2015 11:29:00 -0500 Subject: [PATCH 29/75] Bug fix for os_image and min_disk/min_ram The min_disk and min_ram parameters were not being passed to the shade API. They also need to be integer values. Also updated the description of these parameters for better clarification. --- cloud/openstack/os_image.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py index 076ea806396..b83e98859e7 100644 --- a/cloud/openstack/os_image.py +++ b/cloud/openstack/os_image.py @@ -56,12 +56,12 @@ options: default: None min_disk: description: - - The minimum disk space required to deploy this image + - The minimum disk space (in GB) required to boot this image required: false default: None min_ram: description: - - The minimum ram required to deploy this image + - The minimum ram (in MB) required to boot this image required: false default: None is_public: @@ -125,8 +125,8 @@ def main(): disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']), container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']), owner = dict(default=None), - min_disk = dict(default=None), - min_ram = dict(default=None), + min_disk = dict(type='int', default=0), + min_ram = dict(type='int', default=0), is_public = dict(default=False), filename = dict(default=None), ramdisk = dict(default=None), @@ -156,6 +156,8 @@ def main(): wait=module.params['wait'], timeout=module.params['timeout'], is_public=module.params['is_public'], + min_disk=module.params['min_disk'], + min_ram=module.params['min_ram'] ) changed = True if not module.params['wait']: From 6bd8020f65928cda1dbebf8594d1ead8f439b9f2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 24 Nov 2015 09:32:46 -0800 Subject: [PATCH 30/75] corrected version_added, removed empty alias --- cloud/amazon/ec2.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index eaaec90002a..cfd4991f526 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -248,12 +248,11 @@ options: default: null aliases: ['network_interface'] spot_launch_group: - version_added: "2.0" + version_added: "2.1" description: - Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group) required: false default: null - aliases: [] author: - "Tim Gerla (@tgerla)" From 490038b0e4a9db324bff9e5cc69e790c0ab52bb2 Mon Sep 17 00:00:00 2001 From: Charles Ferguson Date: Mon, 23 Nov 2015 23:42:40 +0000 Subject: [PATCH 31/75] Update documentation of the 'pkg' and 'state' parameters in yum. The yum module allows the 'name' parameter to be given as 'pkg', in a similar way to some of the other package managers. This change documents this alias. The module's 'state' parameter has two other aliases, in line with the 'apt' action; the 'state' parameter can take 'installed' as an alias for 'present', and 'removed' as an alias for 'absent'. These aliases are documented. --- packaging/os/yum.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index e1e3341a075..e0b598a410c 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -51,7 +51,7 @@ options: - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file. To operate on several packages this can accept a comma separated list of packages or (as of 2.0) a list of packages." required: true default: null - aliases: [] + aliases: [ 'pkg' ] exclude: description: - "Package name(s) to exclude when state=present, or latest" @@ -65,9 +65,9 @@ options: default: null state: description: - - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. + - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package. required: false - choices: [ "present", "latest", "absent" ] + choices: [ "present", "installed", "latest", "absent", "removed" ] default: "present" enablerepo: description: From 8ae30f1822cb73ac54eb9e19d922d605a78ed098 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 23 Nov 2015 16:23:41 -0800 Subject: [PATCH 32/75] Minor simplification of code --- cloud/amazon/ec2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6dcc97bc5c8..e035e07af24 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -891,9 +891,9 @@ def create_instances(module, ec2, vpc, override_count=None): grp_details = ec2.get_all_security_groups() if isinstance(group_name, basestring): group_name = [group_name] - unmatched = list(set(group_name) - set([str(grp.name) for grp in grp_details])) + unmatched = set(group_name).difference(str(grp.name) for grp in grp_details) if len(unmatched) > 0: - module.fail_json(msg="the following group names are not valid: %s" % ','.join(unmatched)) + module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched)) group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] # Now we try to lookup the group id testing if group exists. elif group_id: From fca36415d6df5f5877a2db72d3f97056ba9e2f65 Mon Sep 17 00:00:00 2001 From: Charles Ferguson Date: Tue, 24 Nov 2015 15:55:31 +0000 Subject: [PATCH 33/75] Update the documentation of the 'apt' action for the 'name'. The package name has two aliases, 'package' and 'pkg'. Add them to the documentation. --- packaging/os/apt.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index b5c363ab1f5..3fe9c62c07d 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -32,6 +32,7 @@ options: - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported. Note that the apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user. Since we don't have warnings and prompts before installing we disallow this. Use an explicit fnmatch pattern if you want wildcarding) required: false default: null + aliases: [ 'pkg', 'package' ] state: description: - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed. From ab420300efa1cba981cb39049a9ca56ccc303420 Mon Sep 17 00:00:00 2001 From: joshuaeke Date: Tue, 24 Nov 2015 18:48:59 +0000 Subject: [PATCH 34/75] Update ec2.py remove state tag 'exact_count' and 'state' are mutually exclusive options they should not be in the following examples: - # Enforce that 5 running instances named "database" with a "dbtype" of "postgres" example and - # Enforce that 5 instances with a tag "foo" are running --- cloud/amazon/ec2.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index e035e07af24..04aa656d37e 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -488,7 +488,6 @@ EXAMPLES = ''' # - ec2: - state: running key_name: mykey instance_type: c1.medium image: ami-40603AD1 @@ -506,7 +505,6 @@ EXAMPLES = ''' # - ec2: - state: running key_name: mykey instance_type: c1.medium image: ami-40603AD1 From f1f201c234d91e186e56d4cc53fbb018d905c0a4 Mon Sep 17 00:00:00 2001 From: Max Rothman Date: Tue, 24 Nov 2015 14:26:17 -0500 Subject: [PATCH 35/75] Fix rds "promote" command never promoting Previously, the `promote` command in the `rds` module would always return OK and never actually promote an instance. This was because `promote_db_instance()` had its conditions backwards: if the instance had the `replication_source` attribute indicating that it **was** a replica, it would set `changed = False` and do nothing. If the instance **wasn't** a replica, it would attempt to run `boto.rds.promote_read_replica()`, which would always fail. --- cloud/amazon/rds.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index d8f5a2cea86..19f0bbe58d6 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -829,13 +829,17 @@ def promote_db_instance(module, conn): instance_name = module.params.get('instance_name') result = conn.get_db_instance(instance_name) + if not result: + module.fail_json(msg="DB Instance %s does not exist" % instance_name) + if result.get_data().get('replication_source'): - changed = False - else: try: result = conn.promote_read_replica(instance_name, **params) + changed = True except RDSException, e: module.fail_json(msg=e.message) + else: + changed = False if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) From 129bac3649c65137f3a821309de05dc85dae1dc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Andr=C3=A9?= Date: Wed, 25 Nov 2015 16:54:11 +0900 Subject: [PATCH 36/75] Fix typo in ping module short description --- system/ping.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/ping.py b/system/ping.py index 1449cf5dca9..ed93f7dfe11 100644 --- a/system/ping.py +++ b/system/ping.py @@ -23,7 +23,7 @@ DOCUMENTATION = ''' --- module: ping version_added: historical -short_description: Try to connect to host, veryify a usable python and return C(pong) on success. +short_description: Try to connect to host, verify a usable python and return C(pong) on success. description: - A trivial test module, this module always returns C(pong) on successful contact. It does not make sense in playbooks, but it is useful from From b07ff99cace891084d3e978ddfd1953d0145cc0e Mon Sep 17 00:00:00 2001 From: Michel Alexandre Salim Date: Wed, 25 Nov 2015 15:08:48 +0700 Subject: [PATCH 37/75] Fix ec2_snapshot documentation last_snapshot_min_age is added in 2.0, not 1.9 --- cloud/amazon/ec2_snapshot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index 09fa0d90389..a3200efe847 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -74,7 +74,7 @@ options: - If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created. required: false default: 0 - version_added: "1.9" + version_added: "2.0" author: "Will Thames (@willthames)" extends_documentation_fragment: From b2ace272a538bf6cd9cc9c2f3a43120438d55b2a Mon Sep 17 00:00:00 2001 From: Lippy Lee Date: Wed, 25 Nov 2015 22:10:31 +0800 Subject: [PATCH 38/75] Make digital_ocean_domain use API v2 --- cloud/digital_ocean/digital_ocean_domain.py | 30 +++++++++------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py index 3b7a2dce236..d44c4d71134 100644 --- a/cloud/digital_ocean/digital_ocean_domain.py +++ b/cloud/digital_ocean/digital_ocean_domain.py @@ -29,12 +29,9 @@ options: - Indicate desired state of the target. default: present choices: ['present', 'absent'] - client_id: - description: - - DigitalOcean manager id. - api_key: + api_token: description: - - DigitalOcean api key. + - DigitalOcean api token. id: description: - Numeric, the droplet id you want to operate on. @@ -46,8 +43,8 @@ options: - The IP address to point a domain at. notes: - - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. - - Version 1 of DigitalOcean API is used. + - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token. + - Version 2 of DigitalOcean API is used. requirements: - "python >= 2.6" @@ -68,9 +65,9 @@ EXAMPLES = ''' - digital_ocean: > state=present name=test_droplet - size_id=1 - region_id=2 - image_id=3 + size_id=1gb + region_id=sgp1 + image_id=ubuntu-14-04-x64 register: test_droplet - digital_ocean_domain: > @@ -135,8 +132,8 @@ class Domain(JsonfyMixIn): return cls(json) @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) + def setup(cls, api_token): + cls.manager = DoManager(None, api_token, api_version=2) DomainRecord.manager = cls.manager @classmethod @@ -171,16 +168,14 @@ def core(module): return v try: - # params['client_id'] will be None even if client_id is not passed in - client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] - api_key = module.params['api_key'] or os.environ['DO_API_KEY'] + api_token = module.params['api_token'] or os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY'] except KeyError, e: module.fail_json(msg='Unable to load %s' % e.message) changed = True state = module.params['state'] - Domain.setup(client_id, api_key) + Domain.setup(api_token) if state in ('present'): domain = Domain.find(id=module.params["id"]) @@ -223,8 +218,7 @@ def main(): module = AnsibleModule( argument_spec = dict( state = dict(choices=['present', 'absent'], default='present'), - client_id = dict(aliases=['CLIENT_ID'], no_log=True), - api_key = dict(aliases=['API_KEY'], no_log=True), + api_token = dict(aliases=['API_TOKEN'], no_log=True), name = dict(type='str'), id = dict(aliases=['droplet_id'], type='int'), ip = dict(type='str'), From 9c30ef8926b869e6f5b16c1a484dfbcb1b1ccb3a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Nov 2015 23:48:02 -0800 Subject: [PATCH 39/75] doc updates - added version_added to new api_token - updated notes to explain API issues, option switch and versions affected. --- cloud/digital_ocean/digital_ocean.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloud/digital_ocean/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py index d7b55bee693..3b2d1a16484 100644 --- a/cloud/digital_ocean/digital_ocean.py +++ b/cloud/digital_ocean/digital_ocean.py @@ -37,6 +37,7 @@ options: api_token: description: - DigitalOcean api token. + version_added: "1.9.5" id: description: - Numeric, the droplet id you want to operate on. @@ -100,8 +101,9 @@ options: notes: - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token. - - As of Ansible 2.0, Version 2 of the DigitalOcean API is used. - - As of Ansible 2.0, the above parameters were changed significantly. If you are running 1.9.x or earlier, please use C(ansible-doc digital_ocean) to view the correct parameters for your version. Dedicated web docs will be available in the near future for the stable branch. + - As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token). + - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. + Upgrade Ansible or, if unable to, try downloading the latest version of this module from github and putting it into a 'library' directory. requirements: - "python >= 2.6" - dopy From c428483b244123d3cc2d57ba1d7636d119ee5536 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 26 Nov 2015 09:41:33 -0800 Subject: [PATCH 40/75] updated docs to denote retirement of v1 api and clarify when and why auth fields have changed --- cloud/digital_ocean/digital_ocean_domain.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py index d44c4d71134..70d7e300df3 100644 --- a/cloud/digital_ocean/digital_ocean_domain.py +++ b/cloud/digital_ocean/digital_ocean_domain.py @@ -32,6 +32,7 @@ options: api_token: description: - DigitalOcean api token. + version_added: "1.9.5" id: description: - Numeric, the droplet id you want to operate on. @@ -44,7 +45,8 @@ options: notes: - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token. - - Version 2 of DigitalOcean API is used. + - As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token). + - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. requirements: - "python >= 2.6" From cab97cd2d9ca0931e71d5eb17557e2a78b701cc5 Mon Sep 17 00:00:00 2001 From: Jay Rogers Date: Wed, 25 Nov 2015 18:46:17 -0600 Subject: [PATCH 41/75] Update in Amazon IAM Policy Documentation There were typos in the documentation that made the examples seem misleading of what was being demonstrated. This update fixes that. --- cloud/amazon/iam_policy.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 7038612d88e..44a708c9a66 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -64,9 +64,9 @@ extends_documentation_fragment: ''' EXAMPLES = ''' -# Create and policy with the name of 'Admin' to the group 'administrators' +# Create a policy with the name of 'Admin' to the group 'administrators' tasks: -- name: Create two new IAM users with API keys +- name: Assign a policy called Admin to the administrators group iam_policy: iam_type: group iam_name: administrators @@ -87,7 +87,7 @@ task: - Luigi register: new_groups -- name: +- name: Apply READ-ONLY policy to new groups that have been recently created iam_policy: iam_type: group iam_name: "{{ item.created_group.group_name }}" From 3d14397dc0a92f6db9e0b8a242ce7308f06ed5ee Mon Sep 17 00:00:00 2001 From: = Date: Thu, 26 Nov 2015 08:58:34 +0000 Subject: [PATCH 42/75] Fix for 13315 - minute now included in ansible_date_time on windows hosts --- windows/setup.ps1 | 1 + 1 file changed, 1 insertion(+) diff --git a/windows/setup.ps1 b/windows/setup.ps1 index 4d163c7ec26..b31e76684b4 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -68,6 +68,7 @@ Set-Attr $date "year" (Get-Date -format yyyy) Set-Attr $date "month" (Get-Date -format MM) Set-Attr $date "day" (Get-Date -format dd) Set-Attr $date "hour" (Get-Date -format HH) +Set-Attr $date "minute" (Get-Date -format mm) Set-Attr $date "iso8601" (Get-Date -format s) Set-Attr $result.ansible_facts "ansible_date_time" $date From 32edc2f56f27ee5859c5657ec69e14b566b4da07 Mon Sep 17 00:00:00 2001 From: Markus Suonto Date: Thu, 26 Nov 2015 15:34:47 +0200 Subject: [PATCH 43/75] fixed quantum_ modules to work with minimum access rights if greater access rights are not needed --- cloud/openstack/_quantum_network.py | 11 +++++------ cloud/openstack/_quantum_router.py | 15 +++++++-------- cloud/openstack/_quantum_router_interface.py | 13 ++++++------- cloud/openstack/_quantum_subnet.py | 12 ++++++------ 4 files changed, 24 insertions(+), 27 deletions(-) diff --git a/cloud/openstack/_quantum_network.py b/cloud/openstack/_quantum_network.py index 93b10880823..a0a29e6a062 100644 --- a/cloud/openstack/_quantum_network.py +++ b/cloud/openstack/_quantum_network.py @@ -164,18 +164,17 @@ def _get_neutron_client(module, kwargs): def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: - tenant_name = module.params['login_tenant_name'] + _os_tenant_id = _os_keystone.tenant_id else: tenant_name = module.params['tenant_name'] - for tenant in _os_keystone.tenants.list(): - if tenant.name == tenant_name: - _os_tenant_id = tenant.id - break + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break if not _os_tenant_id: module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - def _get_net_id(neutron, module): kwargs = { 'tenant_id': _os_tenant_id, diff --git a/cloud/openstack/_quantum_router.py b/cloud/openstack/_quantum_router.py index 252e1618d90..0c4d2063017 100644 --- a/cloud/openstack/_quantum_router.py +++ b/cloud/openstack/_quantum_router.py @@ -136,17 +136,16 @@ def _get_neutron_client(module, kwargs): def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: - login_tenant_name = module.params['login_tenant_name'] + _os_tenant_id = _os_keystone.tenant_id else: - login_tenant_name = module.params['tenant_name'] + tenant_name = module.params['tenant_name'] - for tenant in _os_keystone.tenants.list(): - if tenant.name == login_tenant_name: - _os_tenant_id = tenant.id - break + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - + module.fail_json(msg = "The tenant id cannot be found, please check the parameters") def _get_router_id(module, neutron): kwargs = { diff --git a/cloud/openstack/_quantum_router_interface.py b/cloud/openstack/_quantum_router_interface.py index 4073c7d3b10..c936e98ad65 100644 --- a/cloud/openstack/_quantum_router_interface.py +++ b/cloud/openstack/_quantum_router_interface.py @@ -138,18 +138,17 @@ def _get_neutron_client(module, kwargs): def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: - login_tenant_name = module.params['login_tenant_name'] + _os_tenant_id = _os_keystone.tenant_id else: - login_tenant_name = module.params['tenant_name'] + tenant_name = module.params['tenant_name'] - for tenant in _os_keystone.tenants.list(): - if tenant.name == login_tenant_name: - _os_tenant_id = tenant.id - break + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break if not _os_tenant_id: module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - def _get_router_id(module, neutron): kwargs = { 'name': module.params['router_name'], diff --git a/cloud/openstack/_quantum_subnet.py b/cloud/openstack/_quantum_subnet.py index 105ca32c582..f2f125f64c8 100644 --- a/cloud/openstack/_quantum_subnet.py +++ b/cloud/openstack/_quantum_subnet.py @@ -170,16 +170,16 @@ def _get_neutron_client(module, kwargs): def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: - tenant_name = module.params['login_tenant_name'] + _os_tenant_id = _os_keystone.tenant_id else: tenant_name = module.params['tenant_name'] - for tenant in _os_keystone.tenants.list(): - if tenant.name == tenant_name: - _os_tenant_id = tenant.id - break + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") + module.fail_json(msg = "The tenant id cannot be found, please check the parameters") def _get_net_id(neutron, module): kwargs = { From 19abe233fed3cb04ec3344f549c48f8ea661aeab Mon Sep 17 00:00:00 2001 From: "Veaceslav (Slava) Mindru" Date: Thu, 26 Nov 2015 08:48:42 -0500 Subject: [PATCH 44/75] Squashed commit of the following: commit 406214fad214359fcf13fe8c7cd3f8f8faac5386 commit 85d1c9b0a41dd075eb2683b1a7de595ca3119614 commit 4aa5049b5ae25dee71a248238201611a466a13c4 commit 65a96974c80aea1fef88d78e218ecb665d8113e1 commit 22ea5863d1dfd628735b46cc7de51c0fd33251de Refactoring --- system/authorized_key.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/system/authorized_key.py b/system/authorized_key.py index 8a97722b222..55c1ec432ca 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -80,6 +80,15 @@ options: choices: [ "yes", "no" ] default: "no" version_added: "1.9" + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only + set to C(no) used on personally controlled sites using self-signed + certificates. Prior to 2.0 the code defaulted to C(yes). + required: false + default: "yes" + choices: ["yes", "no"] + version_added: "2.0" description: - "Adds or removes authorized keys for particular user accounts" author: "Ansible Core Team" @@ -111,6 +120,11 @@ EXAMPLES = ''' key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" key_options='no-port-forwarding,from="10.0.1.1"' +# Using validate_certs: +- authorized_key: user=charlie + key=https://github.com/user.keys + validate_certs=no + # Set up authorized_keys exclusively with one key - authorized_key: user=root key="{{ item }}" state=present exclusive=yes @@ -358,6 +372,7 @@ def enforce_state(module, params): state = params.get("state", "present") key_options = params.get("key_options", None) exclusive = params.get("exclusive", False) + validate_certs = params.get("validate_certs", True) error_msg = "Error getting key from: %s" # if the key is a url, request it and use it as key source @@ -460,6 +475,7 @@ def main(): key_options = dict(required=False, type='str'), unique = dict(default=False, type='bool'), exclusive = dict(default=False, type='bool'), + validate_certs = dict(default=True, type='bool'), ), supports_check_mode=True ) From 9325c0ae5fde9bb035e7f03017b8ce8bcdd635a5 Mon Sep 17 00:00:00 2001 From: Charles Ferguson Date: Thu, 26 Nov 2015 15:40:09 +0000 Subject: [PATCH 45/75] Update documentation for 'file' module to include 'diff_peek'. The 'diff_peek' option isn't documented at all, and provides a rudimentary check that the content isn't binary. Documentation is added to explain the option. The 'validate' option has a declaration, but isn't implemented. Therefore it may as well be removed from the module. --- files/file.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index 8219990d1f6..9e79d4acc61 100644 --- a/files/file.py +++ b/files/file.py @@ -87,6 +87,12 @@ options: - 'force the creation of the symlinks in two cases: the source file does not exist (but will appear later); the destination exists and is a file (so, we need to unlink the "path" file and create symlink to the "src" file in place of it).' + diff_peek: + required: false + description: + - "Only check whether the file looks like binary. Returns with the parameter + 'appears_binary' set to True or False depending on the initial content of the + file. This option is enabled when the option is set (to any value)." ''' EXAMPLES = ''' @@ -158,7 +164,6 @@ def main(): recurse = dict(default=False, type='bool'), force = dict(required=False, default=False, type='bool'), diff_peek = dict(default=None), - validate = dict(required=False, default=None), src = dict(required=False, default=None), ), add_file_common_args=True, From 660b47f62d0c528ab55b94f6f3aab70b6e863caf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 27 Nov 2015 09:28:50 -0800 Subject: [PATCH 46/75] minor doc fixes and reformating updated validate_certs feature to be 2.1 --- system/authorized_key.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 55c1ec432ca..3c8fb5791e5 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -81,14 +81,14 @@ options: default: "no" version_added: "1.9" validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only - set to C(no) used on personally controlled sites using self-signed - certificates. Prior to 2.0 the code defaulted to C(yes). + description: + - This only applies if using a https url as the source of the keys. If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. + - Prior to 2.1 the code worked as if this was set to C(yes). required: false default: "yes" choices: ["yes", "no"] - version_added: "2.0" + version_added: "2.1" description: - "Adds or removes authorized keys for particular user accounts" author: "Ansible Core Team" @@ -102,32 +102,30 @@ EXAMPLES = ''' - authorized_key: user=charlie key=https://github.com/charlie.keys # Using alternate directory locations: -- authorized_key: user=charlie - key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - path='/etc/ssh/authorized_keys/charlie' - manage_dir=no +- authorized_key: + user: charlie + key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" + path: '/etc/ssh/authorized_keys/charlie' + manage_dir: no # Using with_file - name: Set up authorized_keys for the deploy user - authorized_key: user=deploy - key="{{ item }}" + authorized_key: user=deploy key="{{ item }}" with_file: - public_keys/doe-jane - public_keys/doe-john # Using key_options: -- authorized_key: user=charlie - key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - key_options='no-port-forwarding,from="10.0.1.1"' +- authorized_key: + user: charlie + key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" + key_options: 'no-port-forwarding,from="10.0.1.1"' # Using validate_certs: -- authorized_key: user=charlie - key=https://github.com/user.keys - validate_certs=no +- authorized_key: user=charlie key=https://github.com/user.keys validate_certs=no # Set up authorized_keys exclusively with one key -- authorized_key: user=root key="{{ item }}" state=present - exclusive=yes +- authorized_key: user=root key="{{ item }}" state=present exclusive=yes with_file: - public_keys/doe-jane ''' From 176b4103b60698a9327bb388d217200d7bbc4818 Mon Sep 17 00:00:00 2001 From: Charles Ferguson Date: Fri, 27 Nov 2015 20:49:27 +0000 Subject: [PATCH 47/75] Add documentation to 'file' AnsibleModule definition for internals. The parameters 'diff_peek' and 'validate' are not expected to be used by users. They are internal. To make it clear, this change adds the comments 'Internal use only' to each of those definitions to make it clear that they are actually used, just not by end-users. --- files/file.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/files/file.py b/files/file.py index 9e79d4acc61..cc94922fe35 100644 --- a/files/file.py +++ b/files/file.py @@ -87,12 +87,6 @@ options: - 'force the creation of the symlinks in two cases: the source file does not exist (but will appear later); the destination exists and is a file (so, we need to unlink the "path" file and create symlink to the "src" file in place of it).' - diff_peek: - required: false - description: - - "Only check whether the file looks like binary. Returns with the parameter - 'appears_binary' set to True or False depending on the initial content of the - file. This option is enabled when the option is set (to any value)." ''' EXAMPLES = ''' @@ -163,7 +157,8 @@ def main(): original_basename = dict(required=False), # Internal use only, for recursive ops recurse = dict(default=False, type='bool'), force = dict(required=False, default=False, type='bool'), - diff_peek = dict(default=None), + diff_peek = dict(default=None), # Internal use only, for internal checks in the action plugins + validate = dict(required=False, default=None), # Internal use only, for template and copy src = dict(required=False, default=None), ), add_file_common_args=True, From ae582adce64bc0cf168118fe2f4c8f67d222d878 Mon Sep 17 00:00:00 2001 From: Sina Sadeghi Date: Sun, 29 Nov 2015 18:00:44 +1100 Subject: [PATCH 48/75] Update hostname.py Added support for FreeBSD. (http://www.freebsd.org) --- system/hostname.py | 57 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/system/hostname.py b/system/hostname.py index 0d4ca085b83..2d14b0893b7 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -399,6 +399,57 @@ class SolarisStrategy(GenericStrategy): # =========================================== +class FreeBSDStrategy(GenericStrategy): + """ + This is a FreeBSD hostname manipulation strategy class - it edits + the /etc/rc.conf.d/hostname file. + """ + + HOSTNAME_FILE = '/etc/rc.conf.d/hostname' + + def get_permanent_hostname(self): + + if not os.path.isfile(self.HOSTNAME_FILE): + try: + open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n") + except IOError, err: + self.module.fail_json(msg="failed to write file: %s" % + str(err)) + try: + try: + f = open(self.HOSTNAME_FILE, 'r') + for line in f: + line = line.strip() + if line.startswith('hostname='): + return line[10:].strip('"') + except Exception, err: + self.module.fail_json(msg="failed to read hostname: %s" % str(err)) + finally: + f.close() + + return None + + def set_permanent_hostname(self, name): + try: + try: + f = open(self.HOSTNAME_FILE, 'r') + lines = [x.strip() for x in f] + + for i, line in enumerate(lines): + if line.startswith('hostname='): + lines[i] = 'hostname="%s"' % name + break + f.close() + + f = open(self.HOSTNAME_FILE, 'w') + f.write('\n'.join(lines) + '\n') + except Exception, err: + self.module.fail_json(msg="failed to update hostname: %s" % str(err)) + finally: + f.close() + +# =========================================== + class FedoraHostname(Hostname): platform = 'Linux' distribution = 'Fedora' @@ -541,6 +592,12 @@ class SolarisHostname(Hostname): distribution = None strategy_class = SolarisStrategy +class FreeBSDHostname(Hostname): + platform = 'FreeBSD' + distribution = None + strategy_class = FreeBSDStrategy + + # =========================================== def main(): From dc697bf533da90d341abb99b2b035f9d79dc58a4 Mon Sep 17 00:00:00 2001 From: Veaceslav Mindru Date: Sun, 29 Nov 2015 20:51:22 +0100 Subject: [PATCH 49/75] adding validate_certs for YUM. #2582 --- packaging/os/yum.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index e0b598a410c..bed962e0158 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -117,6 +117,16 @@ options: choices: ["yes", "no"] aliases: [] + validate_certs: + description: + - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. + - Prior to 2.1 the code worked as if this was set to C(yes). + required: false + default: "yes" + choices: ["yes", "no"] + version_added: "2.1" + notes: - When used with a loop of package names in a playbook, ansible optimizes the call to the yum module. Instead of calling the module with a single @@ -965,6 +975,7 @@ def main(): conf_file=dict(default=None), disable_gpg_check=dict(required=False, default="no", type='bool'), update_cache=dict(required=False, default="no", type='bool'), + validate_certs=dict(required=False, defaults="yes", type='bool'), # this should not be needed, but exists as a failsafe install_repoquery=dict(required=False, default="yes", type='bool'), ), From c6fdd3809f8bc2a81142197db7fd1acd9f1f0305 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 30 Nov 2015 07:23:29 -0800 Subject: [PATCH 50/75] fixed typo EEXISTS is actually EEXIST fixes #2585 --- files/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index cc94922fe35..428565579b8 100644 --- a/files/file.py +++ b/files/file.py @@ -288,7 +288,7 @@ def main(): except OSError, ex: # Possibly something else created the dir since the os.path.exists # check above. As long as it's a dir, we don't need to error out. - if not (ex.errno == errno.EEXISTS and os.isdir(curpath)): + if not (ex.errno == errno.EEXIST and os.isdir(curpath)): raise tmp_file_args = file_args.copy() tmp_file_args['path']=curpath From bfa7cdb5c4ecde7e0f75a8ca2bc89dcf70207317 Mon Sep 17 00:00:00 2001 From: Dylan Martin Date: Mon, 30 Nov 2015 11:47:38 -0800 Subject: [PATCH 51/75] improved error message when no handler found --- files/unarchive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/unarchive.py b/files/unarchive.py index ac35ea58d88..d5df63a8def 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -250,7 +250,7 @@ def pick_handler(src, dest, module): obj = handler(src, dest, module) if obj.can_handle_archive(): return obj - module.fail_json(msg='Failed to find handler to unarchive. Make sure the required command to extract the file is installed.') + module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed.' % src) def main(): From cd9a7667aa39bbc1ccd606ebebaf3c62f228d601 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 30 Nov 2015 19:02:28 -0800 Subject: [PATCH 52/75] Don't raise or catch StandardError in amazon modules --- cloud/amazon/ec2_asg.py | 58 +++++++++++------------- cloud/amazon/ec2_elb.py | 9 ++-- cloud/amazon/ec2_elb_lb.py | 5 +- cloud/amazon/ec2_lc.py | 2 +- cloud/amazon/ec2_metric_alarm.py | 9 ++-- cloud/amazon/ec2_scaling_policy.py | 5 +- cloud/amazon/ec2_vol.py | 73 ++++++++++++++---------------- cloud/amazon/ec2_vpc_net.py | 49 ++++++++++---------- cloud/amazon/rds_param_group.py | 8 ++-- 9 files changed, 104 insertions(+), 114 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 39444c73c03..6564c4c26bb 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -152,9 +152,9 @@ EXAMPLES = ''' # Rolling ASG Updates -Below is an example of how to assign a new launch config to an ASG and terminate old instances. +Below is an example of how to assign a new launch config to an ASG and terminate old instances. -All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in +All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in a rolling fashion with instances using the current launch configuration, "my_new_lc". This could also be considered a rolling deploy of a pre-baked AMI. @@ -281,7 +281,6 @@ def get_properties(autoscaling_group): if getattr(autoscaling_group, "tags", None): properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) - return properties def elb_dreg(asg_connection, module, group_name, instance_id): @@ -298,7 +297,6 @@ def elb_dreg(asg_connection, module, group_name, instance_id): else: return - exists = True for lb in as_group.load_balancers: elb_connection.deregister_instances(lb, instance_id) log.debug("De-registering {0} from ELB {1}".format(instance_id, lb)) @@ -315,10 +313,8 @@ def elb_dreg(asg_connection, module, group_name, instance_id): time.sleep(10) if wait_timeout <= time.time(): - # waiting took too long + # waiting took too long module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime())) - - def elb_healthy(asg_connection, elb_connection, module, group_name): @@ -337,7 +333,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name): # but has not yet show up in the ELB try: lb_instances = elb_connection.describe_instance_health(lb, instances=instances) - except boto.exception.InvalidInstance, e: + except boto.exception.InvalidInstance: pass for i in lb_instances: if i.state == "InService": @@ -346,7 +342,6 @@ def elb_healthy(asg_connection, elb_connection, module, group_name): return len(healthy_instances) - def wait_for_elb(asg_connection, module, group_name): region, ec2_url, aws_connect_params = get_aws_connection_info(module) wait_timeout = module.params.get('wait_timeout') @@ -370,7 +365,7 @@ def wait_for_elb(asg_connection, module, group_name): log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances)) time.sleep(10) if wait_timeout <= time.time(): - # waiting took too long + # waiting took too long module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime()) log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances)) @@ -396,7 +391,7 @@ def create_autoscaling_group(connection, module): region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) elif vpc_zone_identifier: vpc_zone_identifier = ','.join(vpc_zone_identifier) @@ -433,7 +428,7 @@ def create_autoscaling_group(connection, module): try: connection.create_auto_scaling_group(ag) - if wait_for_instances == True: + if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] @@ -475,7 +470,7 @@ def create_autoscaling_group(connection, module): dead_tags = [] for tag in as_group.tags: have_tags[tag.key] = [tag.value, tag.propagate_at_launch] - if not tag.key in want_tags: + if tag.key not in want_tags: changed = True dead_tags.append(tag) @@ -492,14 +487,13 @@ def create_autoscaling_group(connection, module): changed = True as_group.load_balancers = module.params.get('load_balancers') - if changed: try: as_group.update() except BotoServerError, e: module.fail_json(msg=str(e)) - if wait_for_instances == True: + if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) try: @@ -525,7 +519,7 @@ def delete_autoscaling_group(connection, module): if tmp_groups: tmp_group = tmp_groups[0] if not tmp_group.instances: - instances = False + instances = False time.sleep(10) group.delete() @@ -580,15 +574,15 @@ def replace(connection, module): changed = True return(changed, props) - # we don't want to spin up extra instances if not necessary + # we don't want to spin up extra instances if not necessary if num_new_inst_needed < batch_size: - log.debug("Overriding batch size to {0}".format(num_new_inst_needed)) - batch_size = num_new_inst_needed + log.debug("Overriding batch size to {0}".format(num_new_inst_needed)) + batch_size = num_new_inst_needed if not old_instances: changed = False return(changed, props) - + #check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: min_size = as_group.min_size @@ -637,7 +631,7 @@ def get_instances_by_lc(props, lc_check, initial_instances): new_instances.append(i) else: old_instances.append(i) - + else: log.debug("Comparing initial instances with current: {0}".format(initial_instances)) for i in props['instances']: @@ -659,10 +653,10 @@ def list_purgeable_instances(props, lc_check, replace_instances, initial_instanc # and they have a non-current launch config if lc_check: for i in instances: - if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: + if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: instances_to_terminate.append(i) else: - for i in instances: + for i in instances: if i in initial_instances: instances_to_terminate.append(i) return instances_to_terminate @@ -676,7 +670,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le lc_check = module.params.get('lc_check') decrement_capacity = False break_loop = False - + as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) desired_size = as_group.min_size @@ -720,7 +714,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le elb_dreg(connection, module, group_name, instance_id) log.debug("terminating instance: {0}".format(instance_id)) connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity) - + # we wait to make sure the machines we marked as Unhealthy are # no longer in the list @@ -756,7 +750,7 @@ def wait_for_term_inst(connection, module, term_instances): # waiting took too long module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime()) - + def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. @@ -802,9 +796,9 @@ def main(): termination_policies=dict(type='list', default='Default') ), ) - + module = AnsibleModule( - argument_spec=argument_spec, + argument_spec=argument_spec, mutually_exclusive = [['replace_all_instances', 'replace_instances']] ) @@ -826,13 +820,13 @@ def main(): if state == 'present': create_changed, asg_properties=create_autoscaling_group(connection, module) elif state == 'absent': - changed = delete_autoscaling_group(connection, module) - module.exit_json( changed = changed ) + changed = delete_autoscaling_group(connection, module) + module.exit_json( changed = changed ) if replace_all_instances or replace_instances: replace_changed, asg_properties=replace(connection, module) if create_changed or replace_changed: changed = True module.exit_json( changed = changed, **asg_properties ) - -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 4e19a054bd1..5b5569ce00d 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -257,7 +257,7 @@ class ElbManager: try: elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: self.module.fail_json(msg=str(e)) elbs = [] @@ -290,7 +290,7 @@ class ElbManager: try: asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: self.module.fail_json(msg=str(e)) asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) @@ -314,7 +314,7 @@ class ElbManager: """Returns a boto.ec2.InstanceObject for self.instance_id""" try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] @@ -374,4 +374,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 1d9b2db283e..96ef6b22a99 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -492,7 +492,7 @@ class ElbManager(object): try: return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: self.module.fail_json(msg=str(e)) def _delete_elb(self): @@ -981,4 +981,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 41b7effa502..802b9d05a0b 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -311,7 +311,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) state = module.params.get('state') diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py index 94f303212ae..8ae7195f2e1 100644 --- a/cloud/amazon/ec2_metric_alarm.py +++ b/cloud/amazon/ec2_metric_alarm.py @@ -115,8 +115,6 @@ EXAMPLES = ''' ''' -import sys - try: import boto.ec2.cloudwatch from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm @@ -270,11 +268,11 @@ def main(): state = module.params.get('state') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -288,4 +286,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py index 220fa325582..3c412232e21 100644 --- a/cloud/amazon/ec2_scaling_policy.py +++ b/cloud/amazon/ec2_scaling_policy.py @@ -178,7 +178,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg = str(e)) if state == 'present': @@ -187,4 +187,5 @@ def main(): delete_scaling_policy(connection, module) -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index aba121d8dd9..62e36a74ced 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -47,7 +47,7 @@ options: volume_type: description: - Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default - and continues to remain the Ansible default for backwards compatibility. + and continues to remain the Ansible default for backwards compatibility. required: false default: standard version_added: "1.9" @@ -69,7 +69,7 @@ options: default: null zone: description: - - zone in which to create the volume, if unset uses the zone the instance is in (if set) + - zone in which to create the volume, if unset uses the zone the instance is in (if set) required: false default: null aliases: ['aws_zone', 'ec2_zone'] @@ -87,7 +87,7 @@ options: choices: ["yes", "no"] version_added: "1.5" state: - description: + description: - whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8). required: false default: present @@ -101,15 +101,15 @@ extends_documentation_fragment: EXAMPLES = ''' # Simple attachment action -- ec2_vol: - instance: XXXXXX - volume_size: 5 +- ec2_vol: + instance: XXXXXX + volume_size: 5 device_name: sdd -# Example using custom iops params +# Example using custom iops params - ec2_vol: - instance: XXXXXX - volume_size: 5 + instance: XXXXXX + volume_size: 5 iops: 100 device_name: sdd @@ -118,15 +118,15 @@ EXAMPLES = ''' instance: XXXXXX snapshot: "{{ snapshot }}" -# Playbook example combined with instance launch +# Playbook example combined with instance launch - ec2: keypair: "{{ keypair }}" image: "{{ image }}" - wait: yes + wait: yes count: 3 register: ec2 - ec2_vol: - instance: "{{ item.id }} " + instance: "{{ item.id }} " volume_size: 5 with_items: ec2.instances register: ec2_vol @@ -223,7 +223,7 @@ def get_volume(module, ec2): return vols[0] def get_volumes(module, ec2): - + instance = module.params.get('instance') try: @@ -254,12 +254,10 @@ def boto_supports_volume_encryption(): """ return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0') - + def create_volume(module, ec2, zone): changed = False name = module.params.get('name') - id = module.params.get('id') - instance = module.params.get('instance') iops = module.params.get('iops') encrypted = module.params.get('encrypted') volume_size = module.params.get('volume_size') @@ -292,16 +290,16 @@ def create_volume(module, ec2, zone): def attach_volume(module, ec2, volume, instance): - + device_name = module.params.get('device_name') changed = False - + # If device_name isn't set, make a choice based on best practices here: # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html - + # In future this needs to be more dynamic but combining block device mapping best practices # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) - + # Use password data attribute to tell whether the instance is Windows or Linux if device_name is None: try: @@ -311,7 +309,7 @@ def attach_volume(module, ec2, volume, instance): device_name = '/dev/xvdf' except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - + if volume.attachment_state() is not None: adata = volume.attach_data if adata.instance_id != instance.id: @@ -330,9 +328,9 @@ def attach_volume(module, ec2, volume, instance): return volume, changed def detach_volume(module, ec2, volume): - + changed = False - + if volume.attachment_state() is not None: adata = volume.attach_data volume.detach() @@ -340,15 +338,15 @@ def detach_volume(module, ec2, volume): time.sleep(3) volume.update() changed = True - + return volume, changed - + def get_volume_info(volume, state): - + # If we're just listing volumes then do nothing, else get the latest update for the volume if state != 'list': volume.update() - + volume_info = {} attachment = volume.attach_data @@ -369,7 +367,7 @@ def get_volume_info(volume, state): }, 'tags': volume.tags } - + return volume_info def main(): @@ -397,34 +395,32 @@ def main(): name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') - volume_type = module.params.get('volume_type') - iops = module.params.get('iops') encrypted = module.params.get('encrypted') device_name = module.params.get('device_name') zone = module.params.get('zone') snapshot = module.params.get('snapshot') state = module.params.get('state') - + # Ensure we have the zone or can get the zone if instance is None and zone is None and state == 'present': module.fail_json(msg="You must specify either instance or zone") - + # Set volume detach flag if instance == 'None' or instance == '': instance = None detach_vol_flag = True else: detach_vol_flag = False - + # Set changed flag changed = False region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -471,11 +467,11 @@ def main(): if volume_size and (id or snapshot): module.fail_json(msg="Cannot specify volume_size together with id or snapshot") - + if state == 'present': volume, changed = create_volume(module, ec2, zone) if detach_vol_flag: - volume, changed = detach_volume(module, ec2, volume) + volume, changed = detach_volume(module, ec2, volume) elif inst is not None: volume, changed = attach_volume(module, ec2, volume, inst) @@ -489,4 +485,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py index 51acbcaae37..23ce175b92b 100644 --- a/cloud/amazon/ec2_vpc_net.py +++ b/cloud/amazon/ec2_vpc_net.py @@ -93,9 +93,6 @@ EXAMPLES = ''' ''' -import time -import sys - try: import boto import boto.ec2 @@ -136,15 +133,15 @@ def vpc_exists(module, vpc, name, cidr_block, multi): module.fail_json(msg='Currently there are %d VPCs that have the same name and ' 'CIDR block you specified. If you would like to create ' 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) - + return matched_vpc def update_vpc_tags(vpc, module, vpc_obj, tags, name): - + if tags is None: tags = dict() - + tags.update({'Name': name}) try: current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id})) @@ -156,10 +153,10 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name): except Exception, e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) - + def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): - + if vpc_obj.dhcp_options_id != dhcp_id: connection.associate_dhcp_options(dhcp_id, vpc_obj.id) return True @@ -211,48 +208,47 @@ def main(): tags=module.params.get('tags') state=module.params.get('state') multi=module.params.get('multi_ok') - + changed=False region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") - + if dns_hostnames and not dns_support: module.fail_json('In order to enable DNS Hostnames you must also enable DNS support') if state == 'present': - + # Check if VPC exists vpc_obj = vpc_exists(module, connection, name, cidr_block, multi) - + if vpc_obj is None: try: vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy) changed = True except BotoServerError, e: module.fail_json(msg=e) - - if dhcp_id is not None: + + if dhcp_id is not None: try: if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): changed = True except BotoServerError, e: module.fail_json(msg=e) - - if tags is not None or name is not None: + + if tags is not None or name is not None: try: if update_vpc_tags(connection, module, vpc_obj, tags, name): changed = True except BotoServerError, e: module.fail_json(msg=e) - # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute # which is needed in order to detect the current status of DNS options. For now we just update @@ -263,21 +259,21 @@ def main(): except BotoServerError, e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) - + # get the vpc obj again in case it has changed try: vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0] except BotoServerError, e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) - + module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) elif state == 'absent': - + # Check if VPC exists vpc_obj = vpc_exists(module, connection, name, cidr_block, multi) - + if vpc_obj is not None: try: connection.delete_vpc(vpc_obj.id) @@ -287,11 +283,12 @@ def main(): e_msg = boto_exception(e) module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " "and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg) - + module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) - + # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/rds_param_group.py b/cloud/amazon/rds_param_group.py index b34e3090b53..fab333f0351 100644 --- a/cloud/amazon/rds_param_group.py +++ b/cloud/amazon/rds_param_group.py @@ -112,7 +112,7 @@ except ImportError: # returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group) -class NotModifiableError(StandardError): +class NotModifiableError(Exception): def __init__(self, error_message, *args): super(NotModifiableError, self).__init__(error_message, *args) self.error_message = error_message @@ -175,7 +175,7 @@ def modify_group(group, params, immediate=False): new_params = dict(params) for key in new_params.keys(): - if group.has_key(key): + if key in group: param = group[key] new_value = new_params[key] @@ -281,7 +281,6 @@ def main(): else: break - except BotoServerError, e: module.fail_json(msg = e.error_message) @@ -297,4 +296,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() From 61a3625ed0cf924762b7bce0fec0f92c312acc7b Mon Sep 17 00:00:00 2001 From: krdlab Date: Wed, 2 Dec 2015 18:20:20 +0900 Subject: [PATCH 53/75] Fix 'stat' module document --- files/stat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/stat.py b/files/stat.py index 852ddd5afd2..1e41185ad6a 100644 --- a/files/stat.py +++ b/files/stat.py @@ -111,7 +111,7 @@ stat: path: description: The full path of the file/object to get the facts of returned: success and if path exists - type: boolean + type: string sample: '/path/to/file' mode: description: Unix permissions of the file in octal From 34f7d7b06828d3cf22781dcc7907ecb5a856e2bf Mon Sep 17 00:00:00 2001 From: cspollar Date: Wed, 2 Dec 2015 14:01:16 -0600 Subject: [PATCH 54/75] Fixed typo in uri module example --- network/basics/uri.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 5c0907523b8..73b2f059f7e 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -159,7 +159,7 @@ EXAMPLES = ''' register: webpage - action: fail - when: "'illustrative' not in webpage.content" + when: "'AWESOME' not in webpage.content" # Create a JIRA issue From 292a83cba7693143f39b92c64cb5493423c81bee Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 2 Dec 2015 14:30:28 -0800 Subject: [PATCH 55/75] corrected version_added for new temp_dest feature --- network/basics/get_url.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 1e3eb93a71d..b5a40c729a0 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -62,7 +62,7 @@ options: - https://docs.python.org/2/library/tempfile.html#tempfile.tempdir required: false default: '' - version_added: '2.0' + version_added: '2.1' force: description: - If C(yes) and C(dest) is not a directory, will download the file every From 9c74272c9b8ec573bad16312688697e2329d547d Mon Sep 17 00:00:00 2001 From: Ales Nosek Date: Wed, 2 Dec 2015 20:31:27 -0800 Subject: [PATCH 56/75] Fix #2475 ini_file module: bracklets in key break idempotence Escape the regex special characters in the option name. --- files/ini_file.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index 82d4621dfbb..2dd021ad27c 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -99,6 +99,22 @@ import ConfigParser import sys import os +# ============================================================== +# match_opt + +def match_opt(option, line): + option = re.escape(option) + return re.match('%s *=' % option, line) \ + or re.match('# *%s *=' % option, line) \ + or re.match('; *%s *=' % option, line) + +# ============================================================== +# match_active_opt + +def match_active_opt(option, line): + option = re.escape(option) + return re.match('%s *=' % option, line) + # ============================================================== # do_ini @@ -141,9 +157,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if within_section and option: if state == 'present': # change the existing option line - if re.match('%s *=' % option, line) \ - or re.match('# *%s *=' % option, line) \ - or re.match('; *%s *=' % option, line): + if match_opt(option, line): newline = '%s = %s\n' % (option, value) changed = ini_lines[index] != newline ini_lines[index] = newline @@ -154,14 +168,14 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese line = ini_lines[index] if line.startswith('['): break - if re.match('%s *=' % option, line): + if match_active_opt(option, line): del ini_lines[index] else: index = index + 1 break else: # comment out the existing option line - if re.match('%s *=' % option, line): + if match_active_opt(option, line): ini_lines[index] = '#%s' % ini_lines[index] changed = True break From bfcdb0559734fd740f2aa422cce3bab28887f0bb Mon Sep 17 00:00:00 2001 From: Arthur Clement Date: Thu, 3 Dec 2015 22:48:13 +0100 Subject: [PATCH 57/75] Example of single instance with ssd gp2 root volume creation --- cloud/amazon/ec2.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index e035e07af24..9da62f616ea 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -309,6 +309,22 @@ EXAMPLES = ''' vpc_subnet_id: subnet-29e63245 assign_public_ip: yes +# Single instance with ssd gp2 root volume +- ec2: + key_name: mykey + group: webserver + instance_type: c3.medium + image: ami-123456 + wait: yes + wait_timeout: 500 + volumes: + - device_name: /dev/xvda + volume_type: gp2 + volume_size: 8 + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + exact_count: 1 + # Multiple groups example - ec2: key_name: mykey From 191347676eea08817da3fb237f24cdbf2d16e307 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 4 Dec 2015 09:18:45 -0800 Subject: [PATCH 58/75] When the password file does not exist and we're making sure the user isn't in the password file, change error into a warning Warning catches typos in the filename. Since the playbook is saying "make sure this user doesn't have an entry" it makes more sense to warn than to error. Fixes #2619 --- web_infrastructure/htpasswd.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 4253f1572ac..83a6445374b 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -97,6 +97,7 @@ else: apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + def create_missing_directories(dest): destpath = os.path.dirname(dest) if not os.path.exists(destpath): @@ -155,9 +156,6 @@ def absent(dest, username, check_mode): """ Ensures user is absent Returns (msg, changed) """ - if not os.path.exists(dest): - raise ValueError("%s does not exists" % dest) - if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): ht = HtpasswdFile(dest, new=False) else: @@ -244,6 +242,9 @@ def main(): if state == 'present': (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) elif state == 'absent': + if not os.path.exists(path): + module.exit_json(msg="%s not present" % username, + warnings="%s does not exist" % path, changed=False) (msg, changed) = absent(path, username, check_mode) else: module.fail_json(msg="Invalid state: %s" % state) From 0d5380258e5c73a5f43764b55fda2a7dc26545d2 Mon Sep 17 00:00:00 2001 From: Veaceslav Mindru Date: Sun, 6 Dec 2015 20:54:05 +0100 Subject: [PATCH 59/75] fix typo s/defaults/default --- packaging/os/yum.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index bed962e0158..4c1b1931c41 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -975,7 +975,7 @@ def main(): conf_file=dict(default=None), disable_gpg_check=dict(required=False, default="no", type='bool'), update_cache=dict(required=False, default="no", type='bool'), - validate_certs=dict(required=False, defaults="yes", type='bool'), + validate_certs=dict(required=False, default="yes", type='bool'), # this should not be needed, but exists as a failsafe install_repoquery=dict(required=False, default="yes", type='bool'), ), From 5599bfb07da40e72bcc1a81503ca877d82243016 Mon Sep 17 00:00:00 2001 From: Mark Theunissen Date: Tue, 8 Dec 2015 19:09:50 +0200 Subject: [PATCH 60/75] Remove Mark Theunissen as maintainer --- database/mysql/mysql_db.py | 8 ++++---- database/mysql/mysql_user.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index df70e0f7e51..5942fe2c3b4 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -30,7 +30,7 @@ options: name: description: - name of the database to add or remove - - name=all May only be provided if I(state) is C(dump) or C(import). + - name=all May only be provided if I(state) is C(dump) or C(import). - if name=all Works like --all-databases option for mysqldump (Added in 2.0) required: true default: null @@ -90,7 +90,7 @@ notes: the credentials from C(~/.my.cnf), and finally fall back to using the MySQL default login of C(root) with no password. requirements: [ ConfigParser ] -author: "Mark Theunissen (@marktheunissen)" +author: "Ansible Core Team" ''' EXAMPLES = ''' @@ -367,7 +367,7 @@ def main(): except Exception, e: module.fail_json(msg="error deleting database: " + str(e)) elif state == "dump": - rc, stdout, stderr = db_dump(module, login_host, login_user, + rc, stdout, stderr = db_dump(module, login_host, login_user, login_password, db, target, all_databases, port=login_port, socket=module.params['login_unix_socket']) @@ -376,7 +376,7 @@ def main(): else: module.exit_json(changed=True, db=db, msg=stdout) elif state == "import": - rc, stdout, stderr = db_import(module, login_host, login_user, + rc, stdout, stderr = db_import(module, login_host, login_user, login_password, db, target, all_databases, port=login_port, socket=module.params['login_unix_socket']) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 3ac7c0890cd..3bc84d28ffd 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -120,7 +120,7 @@ notes: the file." requirements: [ "MySQLdb" ] -author: "Mark Theunissen (@marktheunissen)" +author: "Ansible Core Team" ''' EXAMPLES = """ @@ -139,7 +139,7 @@ EXAMPLES = """ # Specify grants composed of more than one word - mysql_user: name=replication password=12345 priv=*.*:"REPLICATION CLIENT" state=present -# Revoke all privileges for user 'bob' and password '12345' +# Revoke all privileges for user 'bob' and password '12345' - mysql_user: name=bob password=12345 priv=*.*:USAGE state=present # Example privileges string format From c93de2f930e4d9d4f6f9f4825fa1a7007a9d80dc Mon Sep 17 00:00:00 2001 From: quoing Date: Tue, 8 Dec 2015 16:31:25 +0100 Subject: [PATCH 61/75] Add "default" entry option back (removed in e95bcae), update will translate entry to standard parameters so compatibility with BDS is kept --- files/acl.py | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/files/acl.py b/files/acl.py index ad0f4607609..91687f05eb5 100644 --- a/files/acl.py +++ b/files/acl.py @@ -127,23 +127,29 @@ def split_entry(entry): ''' splits entry and ensures normalized return''' a = entry.split(':') + + d = None + if entry.lower().startswith("d"): + d = True + a.pop(0) + if len(a) == 2: a.append(None) t, e, p = a - if t.startswith("u"): + if t.lower().startswith("u"): t = "user" - elif t.startswith("g"): + elif t.lower().startswith("g"): t = "group" - elif t.startswith("m"): + elif t.lower().startswith("m"): t = "mask" - elif t.startswith("o"): + elif t.lower().startswith("o"): t = "other" else: t = None - return [t, e, p] + return [d, t, e, p] def build_entry(etype, entity, permissions=None): @@ -269,16 +275,18 @@ def main(): if etype or entity or permissions: module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.") - if state == 'present' and entry.count(":") != 2: - module.fail_json(msg="'entry' MUST have 3 sections divided by ':' when 'state=present'.") + if state == 'present' and not entry.count(":") in [2, 3]: + module.fail_json(msg="'entry' MUST have 3 or 4 sections divided by ':' when 'state=present'.") - if state == 'absent' and entry.count(":") != 1: - module.fail_json(msg="'entry' MUST have 2 sections divided by ':' when 'state=absent'.") + if state == 'absent' and not entry.count(":") in [1, 2]: + module.fail_json(msg="'entry' MUST have 2 or 3 sections divided by ':' when 'state=absent'.") if state == 'query': module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.") - etype, entity, permissions = split_entry(entry) + default_flag, etype, entity, permissions = split_entry(entry) + if default_flag != None: + default = default_flag changed = False msg = "" From db66144386d8a27b458ddf4d86b81588f4ddd021 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 11:38:03 -0800 Subject: [PATCH 62/75] simplified lowercasing --- files/acl.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/files/acl.py b/files/acl.py index 91687f05eb5..8d0807e7430 100644 --- a/files/acl.py +++ b/files/acl.py @@ -137,14 +137,15 @@ def split_entry(entry): a.append(None) t, e, p = a + t = t.lower() - if t.lower().startswith("u"): + if t.startswith("u"): t = "user" - elif t.lower().startswith("g"): + elif t.startswith("g"): t = "group" - elif t.lower().startswith("m"): + elif t.startswith("m"): t = "mask" - elif t.lower().startswith("o"): + elif t.startswith("o"): t = "other" else: t = None From 877daf970d37cb7716ac6bb9351a579548fe54aa Mon Sep 17 00:00:00 2001 From: quoing Date: Tue, 8 Dec 2015 13:04:21 +0100 Subject: [PATCH 63/75] Fix: Default ACL parameters are not correctly handled --- files/acl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/acl.py b/files/acl.py index 8d0807e7430..a3997cd495d 100644 --- a/files/acl.py +++ b/files/acl.py @@ -183,9 +183,9 @@ def build_command(module, mode, path, follow, default, recursive, entry=''): if default: if(mode == 'rm'): - cmd.append('-k') + cmd.insert(1, '-k') else: # mode == 'set' or mode == 'get' - cmd.append('-d') + cmd.insert(1, '-d') cmd.append(path) return cmd From 6128845b696b41d90862f6255b9a0e08557101c3 Mon Sep 17 00:00:00 2001 From: Dominique Barton Date: Tue, 24 Nov 2015 11:40:03 +0100 Subject: [PATCH 64/75] bugfix for issue #2537 --- system/user.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/user.py b/system/user.py index c04b748f068..e43173dac4f 100755 --- a/system/user.py +++ b/system/user.py @@ -1684,7 +1684,8 @@ class DarwinUser(User): out = '' err = '' - self._make_group_numerical() + if self.group: + self._make_group_numerical() for field in self.fields: if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]: From f2b72e62c0f8c83857bd8fd7396395295a08cb9b Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Tue, 8 Dec 2015 16:16:23 -0500 Subject: [PATCH 65/75] fixed disappearing groups on OSX user module Ensure that we don't try to modify the groups collection if groups are not specified --- system/user.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/system/user.py b/system/user.py index e43173dac4f..397abfbc253 100755 --- a/system/user.py +++ b/system/user.py @@ -1674,9 +1674,10 @@ class DarwinUser(User): self._update_system_user() # here we don't care about change status since it is a creation, # thus changed is always true. - (rc, _out, _err, changed) = self._modify_group() - out += _out - err += _err + if self.groups: + (rc, _out, _err, changed) = self._modify_group() + out += _out + err += _err return (rc, err, out) def modify_user(self): @@ -1708,12 +1709,13 @@ class DarwinUser(User): err += _err changed = rc - (rc, _out, _err, _changed) = self._modify_group() - out += _out - err += _err + if self.groups: + (rc, _out, _err, _changed) = self._modify_group() + out += _out + err += _err - if _changed is True: - changed = rc + if _changed is True: + changed = rc rc = self._update_system_user() if rc == 0: From a54d1fe09c3bc15a3bc03c53eb2082f8bad34a7e Mon Sep 17 00:00:00 2001 From: Daniel Kimsey Date: Wed, 9 Dec 2015 11:59:16 -0600 Subject: [PATCH 66/75] Fix yum module failing to initalize yum plugins --- packaging/os/yum.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index bed962e0158..783794690f0 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -195,6 +195,7 @@ def yum_base(conf_file=None): my = yum.YumBase() my.preconf.debuglevel=0 my.preconf.errorlevel=0 + my.preconf.plugins = True if conf_file and os.path.exists(conf_file): my.preconf.fn = conf_file if os.geteuid() != 0: From 8ccfdb874e8db922996d67237d11bdd38f354f9c Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Thu, 10 Dec 2015 00:00:19 +0100 Subject: [PATCH 67/75] Remove a unneeded use of use_unsafe_shell Since use_unsafe_shell is suspicious from a security point of view (or it wouldn't be unsafe), the less we have, the less code we have to toroughly inspect for a security audit. --- system/hostname.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/hostname.py b/system/hostname.py index 2d14b0893b7..1b577367c3e 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -260,8 +260,8 @@ class SystemdStrategy(GenericStrategy): (rc, out, err)) def get_permanent_hostname(self): - cmd = 'hostnamectl --static status' - rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True) + cmd = ['hostnamectl', '--static', 'status'] + rc, out, err = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err)) From 27f561dca2b12db4a61db76d0b97f22b4c2a4acd Mon Sep 17 00:00:00 2001 From: Adam Fields Date: Thu, 10 Dec 2015 12:45:59 -0500 Subject: [PATCH 68/75] added a reference to the template module for clarity --- files/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/copy.py b/files/copy.py index 5dd1e9935e6..122d9808472 100644 --- a/files/copy.py +++ b/files/copy.py @@ -27,7 +27,7 @@ module: copy version_added: "historical" short_description: Copies files to remote locations. description: - - The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box. + - The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box. If you need variable interpolation in copied files, use the M(template) module. options: src: description: From b9fe8166fdcd24691ceac8503ee37ec826c18c00 Mon Sep 17 00:00:00 2001 From: "Timothy R. Chavez" Date: Wed, 9 Dec 2015 19:11:10 -0600 Subject: [PATCH 69/75] Get new server object after adding floating IP We need a new server object once we add the floating ip, otherwise we will be operating with the older server object pre-floating-ip assignment. --- cloud/openstack/os_floating_ip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index 957e3057375..b6342f8fa01 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -154,7 +154,7 @@ def main(): msg="server {0} not found".format(server_name_or_id)) if state == 'present': - cloud.add_ips_to_server( + server = cloud.add_ips_to_server( server=server, ips=floating_ip_address, reuse=reuse, fixed_address=fixed_address, wait=wait, timeout=timeout) fip_address = cloud.get_server_public_ip(server) From c007cd7f9abf8b80a62d627623125912e7256557 Mon Sep 17 00:00:00 2001 From: trevoro Date: Thu, 2 Oct 2014 10:05:35 -0700 Subject: [PATCH 70/75] adding password_hash support to mysql_user module fixing user_add arguments error fixing user_mod arguments error --- database/mysql/mysql_user.py | 72 +++++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 26 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 3bc84d28ffd..a4f7635e5bc 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -35,6 +35,11 @@ options: - set the user's password. (Required when adding a user) required: false default: null + password_hash: + description: + - set the user's password hash (used in place of plain text password) + required: false + default: null host: description: - the 'host' part of the MySQL username @@ -158,6 +163,7 @@ password=n<_665{vS43y import getpass import tempfile import re +import string try: import MySQLdb except ImportError: @@ -211,26 +217,48 @@ def user_exists(cursor, user, host): count = cursor.fetchone() return count[0] > 0 -def user_add(cursor, user, host, password, new_priv): - cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password)) +def user_add(cursor, user, host, password, password_hash, new_priv): + if password and not password_hash: + cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password)) + elif password_hash: + cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user,host,password_hash)) if new_priv is not None: for db_table, priv in new_priv.iteritems(): privileges_grant(cursor, user,host,db_table,priv) return True -def user_mod(cursor, user, host, password, new_priv, append_privs): +def is_hash(password): + ishash = False + if len(password) is 41 and password[0] is '*': + ishash = True + for i in password[1:]: + if i not in string.hexdigits: + ishash = False + break + return ishash + +def user_mod(cursor, user, host, password, password_hash, new_priv, append_privs): changed = False grant_option = False - # Handle passwords - if password is not None: + # Handle passwords. + if password is not None or password_hash is not None: cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) current_pass_hash = cursor.fetchone() - cursor.execute("SELECT PASSWORD(%s)", (password,)) - new_pass_hash = cursor.fetchone() - if current_pass_hash[0] != new_pass_hash[0]: - cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) - changed = True + + if password: + cursor.execute("SELECT PASSWORD(%s)", (password,)) + new_pass_hash = cursor.fetchone() + if current_pass_hash[0] != new_pass_hash[0]: + cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) + changed = True + elif password_hash: + if is_hash(password_hash): + if current_pass_hash[0] != password_hash: + cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password_hash)) + changed = True + else: + module.fail_json(msg="password_hash was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))") # Handle privileges if new_priv is not None: @@ -387,7 +415,8 @@ def main(): login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), - password=dict(default=None, no_log=True), + password=dict(default=None), + password_hash=dict(default=None), host=dict(default="localhost"), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), @@ -401,7 +430,8 @@ def main(): login_password = module.params["login_password"] user = module.params["user"] password = module.params["password"] - host = module.params["host"].lower() + password_hash = module.params["password_hash"] + host = module.params["host"] state = module.params["state"] priv = module.params["priv"] check_implicit_admin = module.params['check_implicit_admin'] @@ -434,21 +464,11 @@ def main(): if state == "present": if user_exists(cursor, user, host): - try: - if update_password == 'always': - changed = user_mod(cursor, user, host, password, priv, append_privs) - else: - changed = user_mod(cursor, user, host, None, priv, append_privs) - - except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: - module.fail_json(msg=str(e)) + changed = user_mod(cursor, user, host, password, password_hash, priv, append_privs) else: - if password is None: - module.fail_json(msg="password parameter required when adding a user") - try: - changed = user_add(cursor, user, host, password, priv) - except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: - module.fail_json(msg=str(e)) + if password is None and password_hash is None: + module.fail_json(msg="password or password_hash parameter required when adding a user") + changed = user_add(cursor, user, host, password, password_hash, priv) elif state == "absent": if user_exists(cursor, user, host): changed = user_delete(cursor, user, host) From aba519868f9896d76a5a0a07c9266b94ba4cb6b4 Mon Sep 17 00:00:00 2001 From: Derek Smith Date: Tue, 23 Jun 2015 15:57:18 -0500 Subject: [PATCH 71/75] updated examples added mysql 5.7 user password modification support with backwards compatibility resolved mysql server version check and differences in user authentication management explicitly state support for mysql_native_password type and no others. fixed some failing logic and updated samples updated comment to actually match logic. simplified conditionals and a little refactor --- database/mysql/mysql_user.py | 102 +++++++++++++++++++++++------------ 1 file changed, 68 insertions(+), 34 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index a4f7635e5bc..766eadb10f0 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -37,9 +37,11 @@ options: default: null password_hash: description: - - set the user's password hash (used in place of plain text password) + - Indicate that the 'password' field is a `mysql_native_password` hash required: false - default: null + choices: [ "yes", "no" ] + default: "no" + version_added: "2.0" host: description: - the 'host' part of the MySQL username @@ -123,6 +125,7 @@ notes: without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from the file." + - Currently, there is only support for the `mysql_native_password` encryted password hash module. requirements: [ "MySQLdb" ] author: "Ansible Core Team" @@ -132,6 +135,9 @@ EXAMPLES = """ # Create database user with name 'bob' and password '12345' with all database privileges - mysql_user: name=bob password=12345 priv=*.*:ALL state=present +# Create database user with name 'bob' and previously hashed mysql native password '*EE0D72C1085C46C5278932678FBE2C6A782821B4' with all database privileges +- mysql_user: name=bob password='*EE0D72C1085C46C5278932678FBE2C6A782821B4' encrypted=yes priv=*.*:ALL state=present + # Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' - mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present @@ -212,53 +218,78 @@ def connect(module, login_user=None, login_password=None, config_file=''): db_connection = MySQLdb.connect(**config) return db_connection.cursor() +# User Authentication Management was change in MySQL 5.7 +# This is a generic check for if the server version is less than version 5.7 +def server_version_check(cursor): + cursor.execute("SELECT VERSION()"); + result = cursor.fetchone() + version_str = result[0] + version = version_str.split('.') + + if (int(version[0]) <= 5 and int(version[1]) < 7): + return True + else: + return False + def user_exists(cursor, user, host): cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host)) count = cursor.fetchone() return count[0] > 0 -def user_add(cursor, user, host, password, password_hash, new_priv): - if password and not password_hash: +def user_add(cursor, user, host, password, encrypted, new_priv): + if password and encrypted: + cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user,host,password)) + elif password and not encrypted: cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password)) - elif password_hash: - cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user,host,password_hash)) if new_priv is not None: for db_table, priv in new_priv.iteritems(): privileges_grant(cursor, user,host,db_table,priv) return True - + def is_hash(password): ishash = False - if len(password) is 41 and password[0] is '*': - ishash = True - for i in password[1:]: - if i not in string.hexdigits: - ishash = False - break + if len(password) == 41 and password[0] == '*': + if frozenset(password[1:]).issubset(string.hexdigits): + ishash = True return ishash def user_mod(cursor, user, host, password, password_hash, new_priv, append_privs): changed = False grant_option = False + + # Handle clear text and hashed passwords. + if bool(password): + # Determine what user management method server uses + old_user_mgmt = server_version_check(cursor) - # Handle passwords. - if password is not None or password_hash is not None: - cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) + if old_user_mgmt: + cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) + else: + cursor.execute("SELECT authentication_string FROM user WHERE user = %s AND host = %s", (user,host)) current_pass_hash = cursor.fetchone() - if password: - cursor.execute("SELECT PASSWORD(%s)", (password,)) - new_pass_hash = cursor.fetchone() - if current_pass_hash[0] != new_pass_hash[0]: - cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) - changed = True - elif password_hash: - if is_hash(password_hash): - if current_pass_hash[0] != password_hash: - cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password_hash)) + if encrypted: + if is_hash(password): + if current_pass_hash[0] != encrypted: + if old_user_mgmt: + cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password)) + else: + cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password AS %s", (user, host, password)) changed = True else: - module.fail_json(msg="password_hash was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))") + module.fail_json(msg="encrypted was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))") + else: + if old_user_mgmt: + cursor.execute("SELECT PASSWORD(%s)", (password,)) + else: + cursor.execute("SELECT CONCAT('*', UCASE(SHA1(UNHEX(SHA1(%s)))))", (password,)) + new_pass_hash = cursor.fetchone() + if current_pass_hash[0] != new_pass_hash[0]: + if old_user_mgmt: + cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user, host, password)) + else: + cursor.execute("ALTER USER %s@%s IDENTIFIED BY %s", (user, host, password)) + changed = True # Handle privileges if new_priv is not None: @@ -415,8 +446,8 @@ def main(): login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), - password=dict(default=None), - password_hash=dict(default=None), + password=dict(default=None, no_log=True), + encrypted=dict(default=False, type='bool'), host=dict(default="localhost"), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), @@ -430,8 +461,8 @@ def main(): login_password = module.params["login_password"] user = module.params["user"] password = module.params["password"] - password_hash = module.params["password_hash"] - host = module.params["host"] + encrypted = module.boolean(module.params["encrypted"]) + host = module.params["host"].lower() state = module.params["state"] priv = module.params["priv"] check_implicit_admin = module.params['check_implicit_admin'] @@ -466,9 +497,12 @@ def main(): if user_exists(cursor, user, host): changed = user_mod(cursor, user, host, password, password_hash, priv, append_privs) else: - if password is None and password_hash is None: - module.fail_json(msg="password or password_hash parameter required when adding a user") - changed = user_add(cursor, user, host, password, password_hash, priv) + if password is None: + module.fail_json(msg="password parameter required when adding a user") + try: + changed = user_add(cursor, user, host, password, encrypted, priv) + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: + module.fail_json(msg=str(e)) elif state == "absent": if user_exists(cursor, user, host): changed = user_delete(cursor, user, host) From 8e812164a40837f16fcf89ae72cce371c4fbf5eb Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Fri, 11 Dec 2015 20:29:45 -0500 Subject: [PATCH 72/75] Add Jmainguy as author, fix hash check --- database/mysql/mysql_user.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 766eadb10f0..059d9fa6f57 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -35,7 +35,7 @@ options: - set the user's password. (Required when adding a user) required: false default: null - password_hash: + encrypted: description: - Indicate that the 'password' field is a `mysql_native_password` hash required: false @@ -128,7 +128,7 @@ notes: - Currently, there is only support for the `mysql_native_password` encryted password hash module. requirements: [ "MySQLdb" ] -author: "Ansible Core Team" +author: "Jonathan Mainguy (@Jmainguy)" ''' EXAMPLES = """ @@ -245,7 +245,7 @@ def user_add(cursor, user, host, password, encrypted, new_priv): for db_table, priv in new_priv.iteritems(): privileges_grant(cursor, user,host,db_table,priv) return True - + def is_hash(password): ishash = False if len(password) == 41 and password[0] == '*': @@ -253,10 +253,10 @@ def is_hash(password): ishash = True return ishash -def user_mod(cursor, user, host, password, password_hash, new_priv, append_privs): +def user_mod(cursor, user, host, password, encrypted, new_priv, append_privs): changed = False grant_option = False - + # Handle clear text and hashed passwords. if bool(password): # Determine what user management method server uses @@ -269,8 +269,9 @@ def user_mod(cursor, user, host, password, password_hash, new_priv, append_privs current_pass_hash = cursor.fetchone() if encrypted: + encrypted_string = (password) if is_hash(password): - if current_pass_hash[0] != encrypted: + if current_pass_hash[0] != encrypted_string: if old_user_mgmt: cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password)) else: @@ -291,6 +292,7 @@ def user_mod(cursor, user, host, password, password_hash, new_priv, append_privs cursor.execute("ALTER USER %s@%s IDENTIFIED BY %s", (user, host, password)) changed = True + # Handle privileges if new_priv is not None: curr_priv = privileges_get(cursor, user,host) @@ -495,7 +497,14 @@ def main(): if state == "present": if user_exists(cursor, user, host): - changed = user_mod(cursor, user, host, password, password_hash, priv, append_privs) + try: + if update_password == 'always': + changed = user_mod(cursor, user, host, password, encrypted, priv, append_privs) + else: + changed = user_mod(cursor, user, host, None, encrypted, priv, append_privs) + + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: + module.fail_json(msg=str(e)) else: if password is None: module.fail_json(msg="password parameter required when adding a user") From ed43b66d7756f9f48cc6e427eb3208af916f82af Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 13:04:01 -0500 Subject: [PATCH 73/75] made note that Z/z are only 2.1 options --- cloud/docker/docker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index c6cf10f0783..19e83aef43f 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -81,8 +81,9 @@ options: description: - List of volumes to mount within the container - 'Use docker CLI-style syntax: C(/host:/container[:mode])' - - You can specify a read mode for the mount with either C(ro) or C(rw). SELinux hosts can additionally - use C(z) or C(Z) mount options to use a shared or private label for the volume. + - You can specify a read mode for the mount with either C(ro) or C(rw). + Starting at version 2.1, SELinux hosts can additionally use C(z) or C(Z) + mount options to use a shared or private label for the volume. default: null volumes_from: description: From 10e70aaf2bdc20149e4e3d4d1ef744e597c9daec Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 17:16:47 -0500 Subject: [PATCH 74/75] note that create globs only work on 2.0 fixes #2666 --- commands/command.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/command.py b/commands/command.py index 3fe16882c24..2bd8f7048ad 100644 --- a/commands/command.py +++ b/commands/command.py @@ -47,12 +47,12 @@ options: default: null creates: description: - - a filename or glob pattern, when it already exists, this step will B(not) be run. + - a filename or (since 2.0) glob pattern, when it already exists, this step will B(not) be run. required: no default: null removes: description: - - a filename or glob pattern, when it does not exist, this step will B(not) be run. + - a filename or (since 2.0) glob pattern, when it does not exist, this step will B(not) be run. version_added: "0.8" required: no default: null From f3ed8192412e8c9b9528a04bfef3cad1bad9f62f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 17:29:27 -0500 Subject: [PATCH 75/75] added missing version_added --- system/user.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/user.py b/system/user.py index 41934389a38..07ad015a561 100755 --- a/system/user.py +++ b/system/user.py @@ -53,6 +53,7 @@ options: required: false description: - Optionally sets the seuser type (user_u) on selinux enabled systems. + version_added: "2.1" group: required: false description: