diff --git a/.travis.yml b/.travis.yml index d1a48cb788e..91d1b9585d7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,8 @@ addons: - python2.4 - python2.6 script: - - python2.4 -m compileall -fq -x 'cloud/|/accelerate.py' . + - python2.4 -m compileall -fq -x 'cloud/' . - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . + #- ./test-docs.sh core diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e441a4e3527..ea9c4ced04e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,6 +22,10 @@ I'd also read the community page above, but in particular, make sure you copy [t Also please make sure you are testing on the latest released version of Ansible or the development branch. +If you'd like to contribute code to an existing module +====================================================== +Each module in Core is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team. + Thanks! diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 6296020b777..ec9da6d4988 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -102,11 +102,12 @@ AWS_REGIONS = ['ap-northeast-1', def get_url(module, url): """ Get url and return response """ - try: - r = urllib2.urlopen(url) - except (urllib2.HTTPError, urllib2.URLError), e: - code = getattr(e, 'code', -1) - module.fail_json(msg="Request failed: %s" % str(e), status_code=code) + + r, info = fetch_url(module, url) + if info['status'] != 200: + # Backwards compat + info['status_code'] = info['status'] + module.fail_json(**info) return r @@ -182,7 +183,7 @@ def main(): choices=['i386', 'amd64']), region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), virt=dict(required=False, default='paravirtual', - choices=['paravirtual', 'hvm']) + choices=['paravirtual', 'hvm']), ) module = AnsibleModule(argument_spec=arg_spec) distro = module.params['distro'] @@ -195,7 +196,8 @@ def main(): # this is magic, see lib/ansible/module_common.py -#<> +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * if __name__ == '__main__': main() diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index 39b6f81a445..f95fbba00e2 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -51,16 +51,23 @@ options: template: description: - The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present" + Must give full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" required: false default: null aliases: [] + notification_arns: + description: + - The Simple Notification Service (SNS) topic ARNs to publish stack related events. + required: false + default: null + version_added: "2.0" stack_policy: description: - the path of the cloudformation stack policy required: false default: null aliases: [] - version_added: "x.x" + version_added: "1.9" tags: description: - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later. @@ -81,8 +88,14 @@ options: - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region as the stack. This parameter is mutually exclusive with 'template'. Either one of them is required if "state" parameter is "present" required: false version_added: "2.0" + template_format: + description: For local templates, allows specification of json or yaml format + default: json + choices: [ json, yaml ] + required: false + version_added: "2.0" -author: James S. Martin +author: "James S. Martin (@jsmartin)" extends_documentation_fragment: aws ''' @@ -103,6 +116,22 @@ EXAMPLES = ''' tags: Stack: "ansible-cloudformation" +# Basic role example +- name: launch ansible cloudformation example + cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "roles/cloudformation/files/cloudformation-example.json" + template_parameters: + KeyName: "jmartin" + DiskType: "ephemeral" + InstanceType: "m1.small" + ClusterSize: 3 + tags: + Stack: "ansible-cloudformation" + # Removal example - name: tear down old deployment cloudformation: @@ -127,6 +156,7 @@ EXAMPLES = ''' import json import time +import yaml try: import boto @@ -191,6 +221,11 @@ def stack_operation(cfn, stack_name, operation): events = map(str, list(stack.describe_events())), output = 'Stack %s failed' % operation) break + elif '%s_ROLLBACK_FAILED' % operation == stack.stack_status: + result = dict(changed=True, failed=True, + events = map(str, list(stack.describe_events())), + output = 'Stack %s rollback failed' % operation) + break else: time.sleep(5) return result @@ -216,9 +251,11 @@ def main(): template_parameters=dict(required=False, type='dict', default={}), state=dict(default='present', choices=['present', 'absent']), template=dict(default=None, required=False), + notification_arns=dict(default=None, required=False), stack_policy=dict(default=None, required=False), disable_rollback=dict(default=False, type='bool'), template_url=dict(default=None, required=False), + template_format=dict(default='json', choices=['json', 'yaml'], required=False), tags=dict(default=None) ) ) @@ -245,6 +282,14 @@ def main(): else: template_body = None + if module.params['template_format'] == 'yaml': + if template_body is None: + module.fail_json(msg='yaml format only supported for local templates') + else: + template_body = json.dumps(yaml.load(template_body), indent=2) + + notification_arns = module.params['notification_arns'] + if module.params['stack_policy'] is not None: stack_policy_body = open(module.params['stack_policy'], 'r').read() else: @@ -285,6 +330,7 @@ def main(): try: cfn.create_stack(stack_name, parameters=template_parameters_tup, template_body=template_body, + notification_arns=notification_arns, stack_policy_body=stack_policy_body, template_url=template_url, disable_rollback=disable_rollback, @@ -307,6 +353,7 @@ def main(): try: cfn.update_stack(stack_name, parameters=template_parameters_tup, template_body=template_body, + notification_arns=notification_arns, stack_policy_body=stack_policy_body, disable_rollback=disable_rollback, template_url=template_url, diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py old mode 100755 new mode 100644 index 95dab865026..a6b378c7e9c --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -44,7 +44,7 @@ options: region: version_added: "1.2" description: - - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. + - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region) required: false default: null aliases: [ 'aws_region', 'ec2_region' ] @@ -57,16 +57,17 @@ options: aliases: [ 'aws_zone', 'ec2_zone' ] instance_type: description: - - instance type to use for the instance + - instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) required: true default: null aliases: [] tenancy: version_added: "1.9" description: - - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. + - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. required: false default: default + choices: [ "default", "dedicated" ] aliases: [] spot_price: version_added: "1.5" @@ -75,6 +76,14 @@ options: required: false default: null aliases: [] + spot_type: + version_added: "2.0" + description: + - Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied. + required: false + default: "one-time" + choices: [ "one-time", "persistent" ] + aliases: [] image: description: - I(ami) ID to use for the instance @@ -123,6 +132,7 @@ options: - enable detailed monitoring (CloudWatch) for instance required: false default: null + choices: [ "yes", "no" ] aliases: [] user_data: version_added: "0.9" @@ -186,7 +196,15 @@ options: description: - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers) required: false - default: true + default: yes + choices: [ "yes", "no" ] + termination_protection: + version_added: "2.0" + description: + - Enable or Disable the Termination Protection + required: false + default: no + choices: [ "yes", "no" ] state: version_added: "1.3" description: @@ -198,7 +216,7 @@ options: volumes: version_added: "1.5" description: - - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. + - "a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict." required: false default: null aliases: [] @@ -223,7 +241,10 @@ options: default: null aliases: [] -author: Seth Vidal, Tim Gerla, Lester Wade +author: + - "Tim Gerla (@tgerla)" + - "Lester Wade (@lwade)" + - "Seth Vidal" extends_documentation_fragment: aws ''' @@ -606,6 +627,19 @@ def get_instance_info(inst): except AttributeError: instance_info['ebs_optimized'] = False + try: + bdm_dict = {} + bdm = getattr(inst, 'block_device_mapping') + for device_name in bdm.keys(): + bdm_dict[device_name] = { + 'status': bdm[device_name].status, + 'volume_id': bdm[device_name].volume_id, + 'delete_on_termination': bdm[device_name].delete_on_termination + } + instance_info['block_device_mapping'] = bdm_dict + except AttributeError: + instance_info['block_device_mapping'] = False + try: instance_info['tenancy'] = getattr(inst, 'placement_tenancy') except AttributeError: @@ -658,6 +692,8 @@ def create_block_device(module, ec2, volume): size = volume.get('volume_size', snapshot.volume_size) if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size: module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) + if 'encrypted' in volume: + module.fail_json(msg = 'You can not set encyrption when creating a volume from a snapshot') if 'ephemeral' in volume: if 'snapshot' in volume: module.fail_json(msg = 'Cannot set both ephemeral and snapshot') @@ -666,8 +702,8 @@ def create_block_device(module, ec2, volume): size=volume.get('volume_size'), volume_type=volume.get('device_type'), delete_on_termination=volume.get('delete_on_termination', False), - iops=volume.get('iops')) - + iops=volume.get('iops'), + encrypted=volume.get('encrypted', None)) def boto_supports_param_in_spot_request(ec2, param): """ Check if Boto library has a in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. @@ -756,6 +792,7 @@ def create_instances(module, ec2, vpc, override_count=None): instance_type = module.params.get('instance_type') tenancy = module.params.get('tenancy') spot_price = module.params.get('spot_price') + spot_type = module.params.get('spot_type') image = module.params.get('image') if override_count: count = override_count @@ -779,6 +816,7 @@ def create_instances(module, ec2, vpc, override_count=None): exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') source_dest_check = module.boolean(module.params.get('source_dest_check')) + termination_protection = module.boolean(module.params.get('termination_protection')) # group_id and group_name are exclusive of each other if group_id and group_name: @@ -948,6 +986,7 @@ def create_instances(module, ec2, vpc, override_count=None): params.update(dict( count = count_remaining, + type = spot_type, )) res = ec2.request_spot_instances(spot_price, **params) @@ -1007,11 +1046,16 @@ def create_instances(module, ec2, vpc, override_count=None): for res in res_list: running_instances.extend(res.instances) - # Enabled by default by Amazon - if not source_dest_check: + # Enabled by default by AWS + if source_dest_check is False: for inst in res.instances: inst.modify_attribute('sourceDestCheck', False) + # Disabled by default by AWS + if termination_protection is True: + for inst in res.instances: + inst.modify_attribute('disableApiTermination', True) + # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound if instance_tags: try: @@ -1022,6 +1066,7 @@ def create_instances(module, ec2, vpc, override_count=None): instance_dict_array = [] created_instance_ids = [] for inst in running_instances: + inst.update() d = get_instance_info(inst) created_instance_ids.append(inst.id) instance_dict_array.append(d) @@ -1127,21 +1172,32 @@ def startstop_instances(module, ec2, instance_ids, state): if not isinstance(instance_ids, list) or len(instance_ids) < 1: module.fail_json(msg='instance_ids should be a list of instances, aborting') - # Check that our instances are not in the state we want to take them to - # and change them to our desired state + # Check (and eventually change) instances attributes and instances state running_instances_array = [] for res in ec2.get_all_instances(instance_ids): for inst in res.instances: - if inst.state != state: - instance_dict_array.append(get_instance_info(inst)) - try: - if state == 'running': - inst.start() - else: - inst.stop() - except EC2ResponseError, e: - module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) - changed = True + + # Check "source_dest_check" attribute + if inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: + inst.modify_attribute('sourceDestCheck', source_dest_check) + changed = True + + # Check "termination_protection" attribute + if inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection: + inst.modify_attribute('disableApiTermination', termination_protection) + changed = True + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + if state == 'running': + inst.start() + else: + inst.stop() + except EC2ResponseError, e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) + changed = True ## Wait for all the instances to finish starting or stopping wait_timeout = time.time() + wait_timeout @@ -1175,6 +1231,7 @@ def main(): zone = dict(aliases=['aws_zone', 'ec2_zone']), instance_type = dict(aliases=['type']), spot_price = dict(), + spot_type = dict(default='one-time', choices=["one-time", "persistent"]), image = dict(), kernel = dict(), count = dict(type='int', default='1'), @@ -1192,7 +1249,8 @@ def main(): instance_profile_name = dict(), instance_ids = dict(type='list', aliases=['instance_id']), source_dest_check = dict(type='bool', default=True), - state = dict(default='present'), + termination_protection = dict(type='bool', default=False), + state = dict(default='present', choices=['present', 'absent', 'running', 'stopped']), exact_count = dict(type='int', default=None), count_tag = dict(), volumes = dict(type='list'), diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index 420c6e2f15c..0d504ee3b0c 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -69,6 +69,12 @@ options: - Image ID to be deregistered. required: false default: null + device_mapping: + version_added: "2.0" + description: + - An optional list of devices with custom configurations (same block-device-mapping parameters) + required: false + default: null delete_snapshot: description: - Whether or not to delete an AMI while deregistering it. @@ -81,7 +87,7 @@ options: default: null version_added: "2.0" -author: Evan Duffield +author: "Evan Duffield (@scicoin-project) " extends_documentation_fragment: aws ''' @@ -110,6 +116,23 @@ EXAMPLES = ''' name: newtest register: instance +# AMI Creation, with a custom root-device size and another EBS attached +- ec2_ami + aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx + aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + instance_id: i-xxxxxx + name: newtest + device_mapping: + - device_name: /dev/sda1 + size: XXX + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdb + size: YYY + delete_on_termination: false + volume_type: gp2 + register: instance + # Deregister/Delete AMI - ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx @@ -136,6 +159,7 @@ import time try: import boto import boto.ec2 + from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping HAS_BOTO = True except ImportError: HAS_BOTO = False @@ -155,6 +179,7 @@ def create_image(module, ec2): wait_timeout = int(module.params.get('wait_timeout')) description = module.params.get('description') no_reboot = module.params.get('no_reboot') + device_mapping = module.params.get('device_mapping') tags = module.params.get('tags') try: @@ -163,9 +188,29 @@ def create_image(module, ec2): 'description': description, 'no_reboot': no_reboot} + if device_mapping: + bdm = BlockDeviceMapping() + for device in device_mapping: + if 'device_name' not in device: + module.fail_json(msg = 'Device name must be set for volume') + device_name = device['device_name'] + del device['device_name'] + bd = BlockDeviceType(**device) + bdm[device_name] = bd + params['block_device_mapping'] = bdm + image_id = ec2.create_image(**params) except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + if e.error_code == 'InvalidAMIName.Duplicate': + images = ec2.get_all_images() + for img in images: + if img.name == name: + module.exit_json(msg="AMI name already present", image_id=img.id, state=img.state, changed=False) + sys.exit(0) + else: + module.fail_json(msg="Error in retrieving duplicate AMI details") + else: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) # Wait until the image is recognized. EC2 API has eventual consistency, # such that a successful CreateImage API call doesn't guarantee the success @@ -248,8 +293,8 @@ def main(): description = dict(default=""), no_reboot = dict(default=False, type="bool"), state = dict(default='present'), - tags = dict(type='dict'), - + device_mapping = dict(type='list'), + tags = dict(type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -282,4 +327,3 @@ from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * main() - diff --git a/cloud/amazon/ec2_ami_find.py b/cloud/amazon/ec2_ami_find.py index 1c790849cff..c8aa5d792df 100644 --- a/cloud/amazon/ec2_ami_find.py +++ b/cloud/amazon/ec2_ami_find.py @@ -25,7 +25,7 @@ description: - Can search AMIs with different owners - Can search by matching tag(s), by AMI name and/or other criteria - Results can be sorted and sliced -author: Tom Bamford +author: "Tom Bamford (@tombamford)" notes: - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com. - See the example below for a suggestion of how to search by distro/release. @@ -141,7 +141,7 @@ EXAMPLES = ''' # Search for the AMI tagged "project:website" - ec2_ami_find: owner: self - tags: + ami_tags: project: website no_result_action: fail register: ami_find diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index d527247f6c5..efcd66606b8 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -21,7 +21,7 @@ description: - Can create or delete AWS Autoscaling Groups - Works with the ec2_lc module to manage Launch Configurations version_added: "1.6" -author: Gareth Rushgrove +author: "Gareth Rushgrove (@garethr)" options: state: description: @@ -43,7 +43,7 @@ options: launch_config_name: description: - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. - required: false + required: true min_size: description: - Minimum number of instances in group @@ -109,6 +109,12 @@ options: default: EC2 version_added: "1.7" choices: ['EC2', 'ELB'] + default_cooldown: + description: + - The number of seconds after a scaling activity completes before another can begin. + required: false + default: 300 seconds + version_added: "2.0" wait_timeout: description: - how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option. @@ -190,9 +196,13 @@ to "replace_instances": ''' import time +import logging as log from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * +log.getLogger('boto').setLevel(log.CRITICAL) +#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + try: import boto.ec2.autoscale @@ -265,8 +275,71 @@ def get_properties(autoscaling_group): if getattr(autoscaling_group, "tags", None): properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) + return properties +def elb_dreg(asg_connection, module, group_name, instance_id): + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + as_group = asg_connection.get_all_groups(names=[group_name])[0] + wait_timeout = module.params.get('wait_timeout') + props = get_properties(as_group) + count = 1 + if as_group.load_balancers and as_group.health_check_type == 'ELB': + try: + elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + else: + return + + exists = True + for lb in as_group.load_balancers: + elb_connection.deregister_instances(lb, instance_id) + log.debug("De-registering {0} from ELB {1}".format(instance_id, lb)) + + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and count > 0: + count = 0 + for lb in as_group.load_balancers: + lb_instances = elb_connection.describe_instance_health(lb) + for i in lb_instances: + if i.instance_id == instance_id and i.state == "InService": + count += 1 + log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description)) + time.sleep(10) + + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime())) + + + + +def elb_healthy(asg_connection, elb_connection, module, group_name): + healthy_instances = [] + as_group = asg_connection.get_all_groups(names=[group_name])[0] + props = get_properties(as_group) + # get healthy, inservice instances from ASG + instances = [] + for instance, settings in props['instance_facts'].items(): + if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': + instances.append(instance) + log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances)) + log.debug("ELB instance status:") + for lb in as_group.load_balancers: + # we catch a race condition that sometimes happens if the instance exists in the ASG + # but has not yet show up in the ELB + try: + lb_instances = elb_connection.describe_instance_health(lb, instances=instances) + except boto.exception.InvalidInstance, e: + pass + for i in lb_instances: + if i.state == "InService": + healthy_instances.append(i.instance_id) + log.debug("{0}: {1}".format(i.instance_id, i.state)) + return len(healthy_instances) + + def wait_for_elb(asg_connection, module, group_name): region, ec2_url, aws_connect_params = get_aws_connection_info(module) @@ -277,36 +350,23 @@ def wait_for_elb(asg_connection, module, group_name): as_group = asg_connection.get_all_groups(names=[group_name])[0] if as_group.load_balancers and as_group.health_check_type == 'ELB': + log.debug("Waiting for ELB to consider intances healthy.") try: elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) wait_timeout = time.time() + wait_timeout - healthy_instances = {} + healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name) - while len(healthy_instances.keys()) < as_group.min_size and wait_timeout > time.time(): - as_group = asg_connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) - # get healthy, inservice instances from ASG - instances = [] - for instance, settings in props['instance_facts'].items(): - if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': - instances.append(instance) - for lb in as_group.load_balancers: - # we catch a race condition that sometimes happens if the instance exists in the ASG - # but has not yet show up in the ELB - try: - lb_instances = elb_connection.describe_instance_health(lb, instances=instances) - except boto.exception.InvalidInstance, e: - pass - for i in lb_instances: - if i.state == "InService": - healthy_instances[i.instance_id] = i.state + while healthy_instances < as_group.min_size and wait_timeout > time.time(): + healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name) + log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances)) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime()) + log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances)) def create_autoscaling_group(connection, module): group_name = module.params.get('name') @@ -320,6 +380,7 @@ def create_autoscaling_group(connection, module): set_tags = module.params.get('tags') health_check_period = module.params.get('health_check_period') health_check_type = module.params.get('health_check_type') + default_cooldown = module.params.get('default_cooldown') wait_for_instances = module.params.get('wait_for_instances') as_groups = connection.get_all_groups(names=[group_name]) wait_timeout = module.params.get('wait_timeout') @@ -359,12 +420,13 @@ def create_autoscaling_group(connection, module): connection=connection, tags=asg_tags, health_check_period=health_check_period, - health_check_type=health_check_type) + health_check_type=health_check_type, + default_cooldown=default_cooldown) try: connection.create_auto_scaling_group(ag) if wait_for_instances == True: - wait_for_new_instances(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) @@ -430,7 +492,7 @@ def create_autoscaling_group(connection, module): module.fail_json(msg=str(e)) if wait_for_instances == True: - wait_for_new_instances(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) try: as_group = connection.get_all_groups(names=[group_name])[0] @@ -471,6 +533,15 @@ def get_chunks(l, n): for i in xrange(0, len(l), n): yield l[i:i+n] +def update_size(group, max_size, min_size, dc): + + log.debug("setting ASG sizes") + log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size )) + group.max_size = max_size + group.min_size = min_size + group.desired_capacity = dc + group.update() + def replace(connection, module): batch_size = module.params.get('replace_batch_size') wait_timeout = module.params.get('wait_timeout') @@ -478,91 +549,191 @@ def replace(connection, module): max_size = module.params.get('max_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') - - # FIXME: we need some more docs about this feature + lc_check = module.params.get('lc_check') replace_instances = module.params.get('replace_instances') as_group = connection.get_all_groups(names=[group_name])[0] - wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') + wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') props = get_properties(as_group) instances = props['instances'] - replaceable = 0 if replace_instances: instances = replace_instances - for k in props['instance_facts'].keys(): - if k in instances: - if props['instance_facts'][k]['launch_config_name'] != props['launch_config_name']: - replaceable += 1 - if replaceable == 0: + # check to see if instances are replaceable if checking launch configs + + new_instances, old_instances = get_instances_by_lc(props, lc_check, instances) + num_new_inst_needed = desired_capacity - len(new_instances) + + if lc_check: + if num_new_inst_needed == 0 and old_instances: + log.debug("No new instances needed, but old instances are present. Removing old instances") + terminate_batch(connection, module, old_instances, instances, True) + as_group = connection.get_all_groups(names=[group_name])[0] + props = get_properties(as_group) + changed = True + return(changed, props) + + # we don't want to spin up extra instances if not necessary + if num_new_inst_needed < batch_size: + log.debug("Overriding batch size to {0}".format(num_new_inst_needed)) + batch_size = num_new_inst_needed + + if not old_instances: changed = False return(changed, props) # set temporary settings and wait for them to be reached + # This should get overriden if the number of instances left is less than the batch size. + as_group = connection.get_all_groups(names=[group_name])[0] - as_group.max_size = max_size + batch_size - as_group.min_size = min_size + batch_size - as_group.desired_capacity = desired_capacity + batch_size - as_group.update() - wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') + update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) + wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) instances = props['instances'] if replace_instances: instances = replace_instances + log.debug("beginning main loop") for i in get_chunks(instances, batch_size): - terminate_batch(connection, module, i) - wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') + # break out of this loop if we have enough new instances + break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False) + wait_for_term_inst(connection, module, term_instances) + wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] - # return settings to normal - as_group.max_size = max_size - as_group.min_size = min_size - as_group.desired_capacity = desired_capacity - as_group.update() + if break_early: + log.debug("breaking loop") + break + update_size(as_group, max_size, min_size, desired_capacity) as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) + log.debug("Rolling update complete.") changed=True return(changed, asg_properties) -def terminate_batch(connection, module, replace_instances): - group_name = module.params.get('name') - wait_timeout = int(module.params.get('wait_timeout')) - lc_check = module.params.get('lc_check') +def get_instances_by_lc(props, lc_check, initial_instances): - as_group = connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) + new_instances = [] + old_instances = [] + # old instances are those that have the old launch config + if lc_check: + for i in props['instances']: + if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']: + new_instances.append(i) + else: + old_instances.append(i) + + else: + log.debug("Comparing initial instances with current: {0}".format(initial_instances)) + for i in props['instances']: + if i not in initial_instances: + new_instances.append(i) + else: + old_instances.append(i) + log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances)) + log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances)) + + return new_instances, old_instances + + +def list_purgeable_instances(props, lc_check, replace_instances, initial_instances): + instances_to_terminate = [] + instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances']) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config - old_instances = [] - instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances']) - if lc_check: for i in instances: if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: - old_instances.append(i) + instances_to_terminate.append(i) else: - old_instances = instances + for i in instances: + if i in initial_instances: + instances_to_terminate.append(i) + return instances_to_terminate - # set all instances given to unhealthy - for instance_id in old_instances: - connection.set_instance_health(instance_id,'Unhealthy') +def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False): + batch_size = module.params.get('replace_batch_size') + min_size = module.params.get('min_size') + desired_capacity = module.params.get('desired_capacity') + group_name = module.params.get('name') + wait_timeout = int(module.params.get('wait_timeout')) + lc_check = module.params.get('lc_check') + decrement_capacity = False + break_loop = False + + as_group = connection.get_all_groups(names=[group_name])[0] + props = get_properties(as_group) + desired_size = as_group.min_size + + new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances) + num_new_inst_needed = desired_capacity - len(new_instances) + + # check to make sure instances given are actually in the given ASG + # and they have a non-current launch config + instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances) + + log.debug("new instances needed: {0}".format(num_new_inst_needed)) + log.debug("new instances: {0}".format(new_instances)) + log.debug("old instances: {0}".format(old_instances)) + log.debug("batch instances: {0}".format(",".join(instances_to_terminate))) + + if num_new_inst_needed == 0: + decrement_capacity = True + if as_group.min_size != min_size: + as_group.min_size = min_size + as_group.update() + log.debug("Updating minimum size back to original of {0}".format(min_size)) + #if are some leftover old instances, but we are already at capacity with new ones + # we don't want to decrement capacity + if leftovers: + decrement_capacity = False + break_loop = True + instances_to_terminate = old_instances + desired_size = min_size + log.debug("No new instances needed") + + if num_new_inst_needed < batch_size and num_new_inst_needed !=0 : + instances_to_terminate = instances_to_terminate[:num_new_inst_needed] + decrement_capacity = False + break_loop = False + log.debug("{0} new instances needed".format(num_new_inst_needed)) + + log.debug("decrementing capacity: {0}".format(decrement_capacity)) + + for instance_id in instances_to_terminate: + elb_dreg(connection, module, group_name, instance_id) + log.debug("terminating instance: {0}".format(instance_id)) + connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity) # we wait to make sure the machines we marked as Unhealthy are # no longer in the list + return break_loop, desired_size, instances_to_terminate + + +def wait_for_term_inst(connection, module, term_instances): + + batch_size = module.params.get('replace_batch_size') + wait_timeout = module.params.get('wait_timeout') + group_name = module.params.get('name') + lc_check = module.params.get('lc_check') + as_group = connection.get_all_groups(names=[group_name])[0] + props = get_properties(as_group) count = 1 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: + log.debug("waiting for instances to terminate") count = 0 as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) instance_facts = props['instance_facts'] - instances = ( i for i in instance_facts if i in old_instances) + instances = ( i for i in instance_facts if i in term_instances) for i in instances: - if ( instance_facts[i]['lifecycle_state'] == 'Terminating' - or instance_facts[i]['health_status'] == 'Unhealthy' ): + lifecycle = instance_facts[i]['lifecycle_state'] + health = instance_facts[i]['health_status'] + log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health )) + if lifecycle == 'Terminating' or healthy == 'Unhealthy': count += 1 time.sleep(10) @@ -570,21 +741,24 @@ def terminate_batch(connection, module, replace_instances): # waiting took too long module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime()) -def wait_for_new_instances(module, connection, group_name, wait_timeout, desired_size, prop): + +def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) + log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop])) # now we make sure that we have enough instances in a viable state wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and desired_size > props[prop]: + log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop])) time.sleep(10) as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime()) - + log.debug("Reached {0}: {1}".format(prop, desired_size)) return props def main(): @@ -608,6 +782,7 @@ def main(): tags=dict(type='list', default=[]), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), + default_cooldown=dict(type='int', default=300), wait_for_instances=dict(type='bool', default=True) ), ) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index 7258ea04759..c3b764b2e63 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -37,25 +37,21 @@ options: version_added: "1.4" reuse_existing_ip_allowed: description: - - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one. + - Reuse an EIP that is not associated to an instance (when available),''' +''' instead of allocating a new one. required: false default: false version_added: "1.6" - wait_timeout: - description: - - how long to wait in seconds for newly provisioned EIPs to become available - default: 300 - version_added: "1.7" extends_documentation_fragment: aws -author: Lorin Hochstein +author: "Lorin Hochstein (@lorin) " notes: - This module will return C(public_ip) on success, which will contain the public IP address associated with the instance. - There may be a delay between the time the Elastic IP is assigned and when - the cloud instance is reachable via the new address. Use wait_for and pause - to delay further playbook execution until the instance is reachable, if - necessary. + the cloud instance is reachable via the new address. Use wait_for and + pause to delay further playbook execution until the instance is reachable, + if necessary. ''' EXAMPLES = ''' @@ -78,7 +74,8 @@ EXAMPLES = ''' ec2_eip: state='present' - name: provision new instances with ec2 - ec2: keypair=mykey instance_type=c1.medium image=ami-40603AD1 wait=yes group=webserver count=3 + ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes''' +''' group=webserver count=3 register: ec2 - name: associate new elastic IPs with each of the instances ec2_eip: "instance_id={{ item }}" @@ -97,178 +94,165 @@ try: except ImportError: HAS_BOTO = False -wait_timeout = 0 -def associate_ip_and_instance(ec2, address, instance_id, module): - if ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): - module.exit_json(changed=False, public_ip=address.public_ip) +class EIPException(Exception): + pass + + +def associate_ip_and_instance(ec2, address, instance_id, check_mode): + if address_is_associated_with_instance(ec2, address, instance_id): + return {'changed': False} # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(changed=True) - - try: - if address.domain == "vpc": - res = ec2.associate_address(instance_id, allocation_id=address.allocation_id) + if not check_mode: + if address.domain == 'vpc': + res = ec2.associate_address(instance_id, + allocation_id=address.allocation_id) else: - res = ec2.associate_address(instance_id, public_ip=address.public_ip) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) - - if res: - module.exit_json(changed=True, public_ip=address.public_ip) - else: - module.fail_json(msg="association failed") + res = ec2.associate_address(instance_id, + public_ip=address.public_ip) + if not res: + raise EIPException('association failed') + + return {'changed': True} -def disassociate_ip_and_instance(ec2, address, instance_id, module): - if not ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): - module.exit_json(changed=False, public_ip=address.public_ip) +def disassociate_ip_and_instance(ec2, address, instance_id, check_mode): + if not address_is_associated_with_instance(ec2, address, instance_id): + return {'changed': False} # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(changed=True) - - try: - if address.domain == "vpc": - res = ec2.disassociate_address(association_id=address.association_id) + if not check_mode: + if address.domain == 'vpc': + res = ec2.disassociate_address( + association_id=address.association_id) else: res = ec2.disassociate_address(public_ip=address.public_ip) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) - if res: - module.exit_json(changed=True) - else: - module.fail_json(msg="disassociation failed") + if not res: + raise EIPException('disassociation failed') + + return {'changed': True} -def find_address(ec2, public_ip, module, fail_on_not_found=True): - """ Find an existing Elastic IP address """ - if wait_timeout != 0: - timeout = time.time() + wait_timeout - while timeout > time.time(): - try: - addresses = ec2.get_all_addresses([public_ip]) - break - except boto.exception.EC2ResponseError, e: - if "Address '%s' not found." % public_ip in e.message : - if not fail_on_not_found: - return None - else: - module.fail_json(msg=str(e.message)) - time.sleep(5) - - if timeout <= time.time(): - module.fail_json(msg = "wait for EIPs timeout on %s" % time.asctime()) - else: - try: - addresses = ec2.get_all_addresses([public_ip]) - except boto.exception.EC2ResponseError, e: - if "Address '%s' not found." % public_ip in e.message : - if not fail_on_not_found: - return None - module.fail_json(msg=str(e.message)) - - return addresses[0] - - -def ip_is_associated_with_instance(ec2, public_ip, instance_id, module): - """ Check if the elastic IP is currently associated with the instance """ - address = find_address(ec2, public_ip, module) - if address: - return address.instance_id == instance_id - else: - return False - -def instance_is_associated(ec2, instance, module): - """ - Check if the given instance object is already associated with an - elastic IP - """ - instance_ip = instance.ip_address - if not instance_ip: - return False - eip = find_address(ec2, instance_ip, module, fail_on_not_found=False) - return (eip and (eip.public_ip == instance_ip)) - -def allocate_address(ec2, domain, module, reuse_existing_ip_allowed): - """ Allocate a new elastic IP address (when needed) and return it """ - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(change=True) - - if reuse_existing_ip_allowed: - if domain: - domain_filter = { 'domain' : domain } - else: - domain_filter = { 'domain' : 'standard' } - all_addresses = ec2.get_all_addresses(filters=domain_filter) - - unassociated_addresses = filter(lambda a: not a.instance_id, all_addresses) - if unassociated_addresses: - address = unassociated_addresses[0]; - else: - address = ec2.allocate_address(domain=domain) - else: - address = ec2.allocate_address(domain=domain) - - return address - - -def release_address(ec2, public_ip, module): - """ Release a previously allocated elastic IP address """ - - address = find_address(ec2, public_ip, module) - - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(change=True) - - res = address.release() - if res: - module.exit_json(changed=True) - else: - module.fail_json(msg="release failed") - - -def find_instance(ec2, instance_id, module): - """ Attempt to find the EC2 instance and return it """ - +def _find_address_by_ip(ec2, public_ip): try: - reservations = ec2.get_all_reservations(instance_ids=[instance_id]) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) - + return ec2.get_all_addresses([public_ip])[0] + except boto.exception.EC2ResponseError as e: + if "Address '{}' not found.".format(public_ip) not in e.message: + raise + + +def _find_address_by_instance_id(ec2, instance_id): + addresses = ec2.get_all_addresses(None, {'instance-id': instance_id}) + if addresses: + return addresses[0] + + +def find_address(ec2, public_ip, instance_id): + """ Find an existing Elastic IP address """ + if public_ip: + return _find_address_by_ip(ec2, public_ip) + elif instance_id: + return _find_address_by_instance_id(ec2, instance_id) + + +def address_is_associated_with_instance(ec2, address, instance_id): + """ Check if the elastic IP is currently associated with the instance """ + if address: + return address and address.instance_id == instance_id + return False + + +def allocate_address(ec2, domain, reuse_existing_ip_allowed): + """ Allocate a new elastic IP address (when needed) and return it """ + if reuse_existing_ip_allowed: + domain_filter = {'domain': domain or 'standard'} + all_addresses = ec2.get_all_addresses(filters=domain_filter) + + unassociated_addresses = [a for a in all_addresses + if not a.instance_id] + if unassociated_addresses: + return unassociated_addresses[0] + + return ec2.allocate_address(domain=domain) + + +def release_address(ec2, address, check_mode): + """ Release a previously allocated elastic IP address """ + + # If we're in check mode, nothing else to do + if not check_mode: + if not address.release(): + EIPException('release failed') + + return {'changed': True} + + +def find_instance(ec2, instance_id): + """ Attempt to find the EC2 instance and return it """ + + reservations = ec2.get_all_reservations(instance_ids=[instance_id]) + if len(reservations) == 1: instances = reservations[0].instances if len(instances) == 1: return instances[0] - - module.fail_json(msg="could not find instance" + instance_id) - -def allocate_eip(ec2, eip_domain, module, reuse_existing_ip_allowed, new_eip_timeout): - # Allocate a new elastic IP - address = allocate_address(ec2, eip_domain, module, reuse_existing_ip_allowed) - # overriding the timeout since this is a a newly provisioned ip - global wait_timeout - wait_timeout = new_eip_timeout - return address + raise EIPException("could not find instance" + instance_id) + + +def ensure_present(ec2, domain, address, instance_id, + reuse_existing_ip_allowed, check_mode): + changed = False + + # Return the EIP object since we've been given a public IP + if not address: + if check_mode: + return {'changed': True} + + address = allocate_address(ec2, domain, reuse_existing_ip_allowed) + changed = True + + if instance_id: + # Allocate an IP for instance since no public_ip was provided + instance = find_instance(ec2, instance_id) + if instance.vpc_id: + domain = 'vpc' + + # Associate address object (provided or allocated) with instance + assoc_result = associate_ip_and_instance(ec2, address, instance_id, + check_mode) + changed = changed or assoc_result['changed'] + + return {'changed': changed, 'public_ip': address.public_ip} + + +def ensure_absent(ec2, domain, address, instance_id, check_mode): + if not address: + return {'changed': False} + + # disassociating address from instance + if instance_id: + return disassociate_ip_and_instance(ec2, address, instance_id, + check_mode) + # releasing address + else: + return release_address(ec2, address, check_mode) def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - instance_id = dict(required=False), - public_ip = dict(required=False, aliases= ['ip']), - state = dict(required=False, default='present', - choices=['present', 'absent']), - in_vpc = dict(required=False, type='bool', default=False), - reuse_existing_ip_allowed = dict(required=False, type='bool', default=False), - wait_timeout = dict(default=300), - ) - ) + instance_id=dict(required=False), + public_ip=dict(required=False, aliases=['ip']), + state=dict(required=False, default='present', + choices=['present', 'absent']), + in_vpc=dict(required=False, type='bool', default=False), + reuse_existing_ip_allowed=dict(required=False, type='bool', + default=False), + wait_timeout=dict(default=300), + )) module = AnsibleModule( argument_spec=argument_spec, @@ -284,54 +268,27 @@ def main(): public_ip = module.params.get('public_ip') state = module.params.get('state') in_vpc = module.params.get('in_vpc') - domain = "vpc" if in_vpc else None + domain = 'vpc' if in_vpc else None reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') - new_eip_timeout = int(module.params.get('wait_timeout')) - if state == 'present': - # If both instance_id and public_ip are not specified, allocate a new - # elastic IP, and exit. - if not instance_id and not public_ip: - address = allocate_eip(ec2, domain, module, - reuse_existing_ip_allowed, new_eip_timeout) - module.exit_json(changed=True, public_ip=address.public_ip) + try: + address = find_address(ec2, public_ip, instance_id) - # Return the EIP object since we've been given a public IP - if public_ip: - address = find_address(ec2, public_ip, module) - - if instance_id and not public_ip: - instance = find_instance(ec2, instance_id, module) - - if instance.vpc_id: - domain = "vpc" - - # Do nothing if the instance is already associated with an - # elastic IP. - if instance_is_associated(ec2, instance, module): - module.exit_json(changed=False, public_ip=instance.ip_address) - - # If the instance is not already associated with an elastic IP, - # allocate a new one. - address = allocate_eip( - ec2, domain, module, reuse_existing_ip_allowed, new_eip_timeout) - - # Associate address object (provided or allocated) with instance - associate_ip_and_instance(ec2, address, instance_id, module) - - else: - #disassociating address from instance - if instance_id: - address = find_address(ec2, public_ip, module) - disassociate_ip_and_instance(ec2, address, instance_id, module) - #releasing address + if state == 'present': + result = ensure_present(ec2, domain, address, instance_id, + reuse_existing_ip_allowed, + module.check_mode) else: - release_address(ec2, public_ip, module) + result = ensure_absent(ec2, domain, address, instance_id, module.check_mode) + except (boto.exception.EC2ResponseError, EIPException) as e: + module.fail_json(msg=str(e)) + + module.exit_json(**result) # import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * +from ansible.module_utils.basic import * # noqa +from ansible.module_utils.ec2 import * # noqa if __name__ == '__main__': main() diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 2a29a5e49a9..6530a00bcb9 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -25,7 +25,7 @@ description: if state=absent is passed as an argument. - Will be marked changed when called only if there are ELBs found to operate on. version_added: "1.2" -author: John Jarvis +author: "John Jarvis (@jarv)" options: state: description: @@ -103,6 +103,7 @@ import time try: import boto import boto.ec2 + import boto.ec2.autoscale import boto.ec2.elb from boto.regioninfo import RegionInfo HAS_BOTO = True @@ -129,9 +130,9 @@ class ElbManager: for lb in self.lbs: initial_state = self._get_instance_health(lb) if initial_state is None: - # The instance isn't registered with this ELB so just - # return unchanged - return + # Instance isn't registered with this load + # balancer. Ignore it and try the next one. + continue lb.deregister_instances([self.instance_id]) @@ -254,6 +255,9 @@ class ElbManager: for elb lookup instead of returning what elbs are attached to self.instance_id""" + if not ec2_elbs: + ec2_elbs = self._get_auto_scaling_group_lbs() + try: elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) @@ -272,6 +276,32 @@ class ElbManager: lbs.append(lb) return lbs + def _get_auto_scaling_group_lbs(self): + """Returns a list of ELBs associated with self.instance_id + indirectly through its auto scaling group membership""" + + try: + asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + self.module.fail_json(msg=str(e)) + + asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) + if len(asg_instances) > 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") + + if not asg_instances: + asg_elbs = [] + else: + asg_name = asg_instances[0].group_name + + asgs = asg.get_all_groups([asg_name]) + if len(asg_instances) != 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group.") + + asg_elbs = asgs[0].load_balancers + + return asg_elbs + def _get_instance(self): """Returns a boto.ec2.InstanceObject for self.instance_id""" try: diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 4a03542de66..3d54f994436 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -22,7 +22,9 @@ description: - Will be marked changed when called only if state is changed. short_description: Creates or destroys Amazon ELB. version_added: "1.5" -author: Jim Dalton +author: + - "Jim Dalton (@jsdalton)" + - "Rick Mendes (@rickmendes)" options: state: description: @@ -56,6 +58,12 @@ options: require: false default: None version_added: "1.6" + security_group_names: + description: + - A list of security group names to apply to the elb + require: false + default: None + version_added: "2.0" health_check: description: - An associative array of health check configuration settings (see example) @@ -68,7 +76,7 @@ options: aliases: ['aws_region', 'ec2_region'] subnets: description: - - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. + - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. required: false default: None aliases: [] @@ -77,7 +85,7 @@ options: description: - Purge existing subnet on ELB that are not found in subnets required: false - default: false + default: false version_added: "1.7" scheme: description: @@ -147,7 +155,7 @@ EXAMPLES = """ name: "test-vpc" scheme: internal state: present - subnets: + subnets: - subnet-abcd1234 - subnet-1a2b3c4d listeners: @@ -213,7 +221,7 @@ EXAMPLES = """ instance_port: 80 purge_zones: yes -# Creates a ELB and assigns a list of subnets to it. +# Creates a ELB and assigns a list of subnets to it. - local_action: module: ec2_elb_lb state: present @@ -297,10 +305,10 @@ class ElbManager(object): """Handles ELB creation and destruction""" def __init__(self, module, name, listeners=None, purge_listeners=None, - zones=None, purge_zones=None, security_group_ids=None, + zones=None, purge_zones=None, security_group_ids=None, health_check=None, subnets=None, purge_subnets=None, scheme="internet-facing", connection_draining_timeout=None, - cross_az_load_balancing=None, + cross_az_load_balancing=None, stickiness=None, region=None, **aws_connect_params): self.module = module @@ -361,7 +369,8 @@ class ElbManager(object): if not check_elb: info = { 'name': self.name, - 'status': self.status + 'status': self.status, + 'region': self.region } else: try: @@ -384,9 +393,34 @@ class ElbManager(object): 'hosted_zone_name': check_elb.canonical_hosted_zone_name, 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, 'lb_cookie_policy': lb_cookie_policy, - 'app_cookie_policy': app_cookie_policy + 'app_cookie_policy': app_cookie_policy, + 'instances': [instance.id for instance in check_elb.instances], + 'out_of_service_count': 0, + 'in_service_count': 0, + 'unknown_instance_state_count': 0, + 'region': self.region } + # status of instances behind the ELB + if info['instances']: + info['instance_health'] = [ dict( + instance_id = instance_state.instance_id, + reason_code = instance_state.reason_code, + state = instance_state.state + ) for instance_state in self.elb_conn.describe_instance_health(self.name)] + else: + info['instance_health'] = [] + + # instance state counts: InService or OutOfService + if info['instance_health']: + for instance_state in info['instance_health']: + if instance_state['state'] == "InService": + info['in_service_count'] += 1 + elif instance_state['state'] == "OutOfService": + info['out_of_service_count'] += 1 + else: + info['unknown_instance_state_count'] += 1 + if check_elb.health_check: info['health_check'] = { 'target': check_elb.health_check.target, @@ -418,7 +452,7 @@ class ElbManager(object): else: info['cross_az_load_balancing'] = 'no' - # return stickiness info? + # return stickiness info? return info @@ -598,7 +632,7 @@ class ElbManager(object): self._attach_subnets(subnets_to_attach) if subnets_to_detach: self._detach_subnets(subnets_to_detach) - + def _set_zones(self): """Determine which zones need to be enabled or disabled on the ELB""" if self.zones: @@ -703,7 +737,7 @@ class ElbManager(object): else: self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0]) self.changed = True - + self._set_listener_policy(listeners_dict, policy) def select_stickiness_policy(self): @@ -770,7 +804,7 @@ class ElbManager(object): else: self._set_listener_policy(listeners_dict) - + def _get_health_check_target(self): """Compose target string from healthcheck parameters""" protocol = self.health_check['ping_protocol'].upper() @@ -792,6 +826,7 @@ def main(): zones={'default': None, 'required': False, 'type': 'list'}, purge_zones={'default': False, 'required': False, 'type': 'bool'}, security_group_ids={'default': None, 'required': False, 'type': 'list'}, + security_group_names={'default': None, 'required': False, 'type': 'list'}, health_check={'default': None, 'required': False, 'type': 'dict'}, subnets={'default': None, 'required': False, 'type': 'list'}, purge_subnets={'default': False, 'required': False, 'type': 'bool'}, @@ -804,6 +839,7 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, + mutually_exclusive = [['security_group_ids', 'security_group_names']] ) if not HAS_BOTO: @@ -820,6 +856,7 @@ def main(): zones = module.params['zones'] purge_zones = module.params['purge_zones'] security_group_ids = module.params['security_group_ids'] + security_group_names = module.params['security_group_names'] health_check = module.params['health_check'] subnets = module.params['subnets'] purge_subnets = module.params['purge_subnets'] @@ -834,6 +871,21 @@ def main(): if state == 'present' and not (zones or subnets): module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") + if security_group_names: + security_group_ids = [] + try: + ec2 = ec2_connect(module) + grp_details = ec2.get_all_security_groups() + + for group_name in security_group_names: + if isinstance(group_name, basestring): + group_name = [group_name] + + group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] + security_group_ids.extend(group_id) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + elb_man = ElbManager(module, name, listeners, purge_listeners, zones, purge_zones, security_group_ids, health_check, subnets, purge_subnets, scheme, diff --git a/cloud/amazon/ec2_facts.py b/cloud/amazon/ec2_facts.py index cf2a90aabc5..6bd587bf018 100644 --- a/cloud/amazon/ec2_facts.py +++ b/cloud/amazon/ec2_facts.py @@ -36,7 +36,7 @@ description: The module must be called from within the EC2 instance itself. notes: - Parameters to filter on ec2_facts may be added later. -author: "Silviu Dicu " +author: "Silviu Dicu (@silviud) " ''' EXAMPLES = ''' diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index 6552e5abf67..bde2f5cc19e 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -5,6 +5,7 @@ DOCUMENTATION = ''' --- module: ec2_group +author: "Andrew de Quincey (@adq)" version_added: "1.3" short_description: maintain an ec2 VPC security group. description: @@ -24,15 +25,11 @@ options: required: false rules: description: - - List of firewall inbound rules to enforce in this group (see''' -''' example). If none are supplied, a default all-out rule is assumed.''' -''' If an empty list is supplied, no inbound rules will be enabled. + - List of firewall inbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no inbound rules will be enabled. required: false rules_egress: description: - - List of firewall outbound rules to enforce in this group (see''' -''' example). If none are supplied, a default all-out rule is assumed.''' -''' If an empty list is supplied, no outbound rules will be enabled. + - List of firewall outbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. required: false version_added: "1.6" region: @@ -90,6 +87,14 @@ EXAMPLES = ''' from_port: 22 to_port: 22 cidr_ip: 10.0.0.0/8 + - proto: tcp + from_port: 443 + to_port: 443 + group_id: amazon-elb/sg-87654321/amazon-elb-sg + - proto: tcp + from_port: 3306 + to_port: 3306 + group_id: 123412341234/sg-87654321/exact-name-of-sg - proto: udp from_port: 10050 to_port: 10050 @@ -113,6 +118,7 @@ EXAMPLES = ''' try: import boto.ec2 + from boto.ec2.securitygroup import SecurityGroup HAS_BOTO = True except ImportError: HAS_BOTO = False @@ -122,6 +128,11 @@ def make_rule_key(prefix, rule, group_id, cidr_ip): """Creates a unique key for an individual group rule""" if isinstance(rule, dict): proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')] + #fix for 11177 + if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1: + from_port = 'none' + to_port = 'none' + else: # isinstance boto.ec2.securitygroup.IPPermissions proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')] @@ -135,6 +146,22 @@ def addRulesToLookup(rules, prefix, dict): dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant) +def validate_rule(module, rule): + VALID_PARAMS = ('cidr_ip', + 'group_id', 'group_name', 'group_desc', + 'proto', 'from_port', 'to_port') + for k in rule: + if k not in VALID_PARAMS: + module.fail_json(msg='Invalid rule parameter \'{}\''.format(k)) + + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_id OR cidr_ip, not both') + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_name OR cidr_ip, not both') + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg='Specify group_id OR group_name, not both') + + def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): """ Returns tuple of (group_id, ip) after validating rule params. @@ -148,6 +175,7 @@ def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): group_id or a non-None ip range. """ + FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)' group_id = None group_name = None ip = None @@ -158,6 +186,12 @@ def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): module.fail_json(msg="Specify group_name OR cidr_ip, not both") elif 'group_id' in rule and 'group_name' in rule: module.fail_json(msg="Specify group_id OR group_name, not both") + elif 'group_id' in rule and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() + group_instance = SecurityGroup(owner_id=owner_id, name=group_name, id=group_id) + groups[group_id] = group_instance + groups[group_name] = group_instance elif 'group_id' in rule: group_id = rule['group_id'] elif 'group_name' in rule: @@ -291,6 +325,8 @@ def main(): # Now, go through all provided rules and ensure they are there. if rules is not None: for rule in rules: + validate_rule(module, rule) + group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id) if target_group_created: changed = True @@ -319,6 +355,11 @@ def main(): for (rule, grant) in groupRules.itervalues() : grantGroup = None if grant.group_id: + if grant.owner_id != group.owner_id: + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + group_instance = SecurityGroup(owner_id=grant.owner_id, name=grant.name, id=grant.group_id) + groups[grant.group_id] = group_instance + groups[grant.name] = group_instance grantGroup = groups[grant.group_id] if not module.check_mode: group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) @@ -331,6 +372,8 @@ def main(): # Now, go through all provided rules and ensure they are there. if rules_egress is not None: for rule in rules_egress: + validate_rule(module, rule) + group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id) if target_group_created: changed = True diff --git a/cloud/amazon/ec2_key.py b/cloud/amazon/ec2_key.py index 6bc9d936ee3..b59c50034d6 100644 --- a/cloud/amazon/ec2_key.py +++ b/cloud/amazon/ec2_key.py @@ -46,7 +46,7 @@ options: version_added: "1.6" extends_documentation_fragment: aws -author: Vincent Viallet +author: "Vincent Viallet (@zbal)" ''' EXAMPLES = ''' @@ -127,25 +127,23 @@ def main(): if state == 'absent': if key: '''found a match, delete it''' - try: - key.delete() - if wait: - start = time.time() - action_complete = False - while (time.time() - start) < wait_timeout: - if not ec2.get_key_pair(name): - action_complete = True - break - time.sleep(1) - if not action_complete: - module.fail_json(msg="timed out while waiting for the key to be removed") - except Exception, e: - module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) - else: - key = None - changed = True - else: - '''no match found, no changes required''' + if not module.check_mode: + try: + key.delete() + if wait: + start = time.time() + action_complete = False + while (time.time() - start) < wait_timeout: + if not ec2.get_key_pair(name): + action_complete = True + break + time.sleep(1) + if not action_complete: + module.fail_json(msg="timed out while waiting for the key to be removed") + except Exception, e: + module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) + key = None + changed = True # Ensure requested key is present elif state == 'present': diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 18a736600d0..0721b4e203d 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -26,7 +26,7 @@ notes: after it is changed will not modify the launch configuration on AWS. You must create a new config and assign it to the ASG instead." version_added: "1.6" -author: Gareth Rushgrove +author: "Gareth Rushgrove (@garethr)" options: state: description: @@ -116,6 +116,18 @@ options: default: false aliases: [] version_added: "1.8" + classic_link_vpc_id: + description: + - Id of ClassicLink enabled VPC + required: false + default: null + version_added: "2.0" + classic_link_vpc_security_groups: + description: + - A list of security group id’s with which to associate the ClassicLink VPC instances. + required: false + default: null + version_added: "2.0" extends_documentation_fragment: aws """ @@ -126,6 +138,12 @@ EXAMPLES = ''' key_name: default security_groups: ['group', 'group2' ] instance_type: t1.micro + volumes: + - device_name: /dev/sda1 + volume_size: 100 + device_type: io1 + iops: 3000 + delete_on_termination: true ''' @@ -178,6 +196,8 @@ def create_launch_config(connection, module): ramdisk_id = module.params.get('ramdisk_id') instance_profile_name = module.params.get('instance_profile_name') ebs_optimized = module.params.get('ebs_optimized') + classic_link_vpc_id = module.params.get('classic_link_vpc_id') + classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups') bdm = BlockDeviceMapping() if volumes: @@ -200,10 +220,12 @@ def create_launch_config(connection, module): kernel_id=kernel_id, spot_price=spot_price, instance_monitoring=instance_monitoring, - associate_public_ip_address = assign_public_ip, + associate_public_ip_address=assign_public_ip, ramdisk_id=ramdisk_id, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, + classic_link_vpc_security_groups=classic_link_vpc_security_groups, + classic_link_vpc_id=classic_link_vpc_id, ) launch_configs = connection.get_all_launch_configurations(names=[name]) @@ -219,7 +241,8 @@ def create_launch_config(connection, module): module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=instance_type) + security_groups=result.security_groups, instance_type=result.instance_type, + result=result) def delete_launch_config(connection, module): @@ -251,7 +274,9 @@ def main(): ebs_optimized=dict(default=False, type='bool'), associate_public_ip_address=dict(type='bool'), instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(type='bool') + assign_public_ip=dict(type='bool'), + classic_link_vpc_security_groups=dict(type='list'), + classic_link_vpc_id=dict(type='str') ) ) diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py index aecd80ecdad..578a1af7297 100644 --- a/cloud/amazon/ec2_metric_alarm.py +++ b/cloud/amazon/ec2_metric_alarm.py @@ -21,7 +21,7 @@ description: - Can create or delete AWS metric alarms - Metrics you wish to alarm on must already exist version_added: "1.6" -author: Zacharie Eakin +author: "Zacharie Eakin (@zeekin)" options: state: description: diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py index 707f462ec90..10f03e9fc46 100644 --- a/cloud/amazon/ec2_scaling_policy.py +++ b/cloud/amazon/ec2_scaling_policy.py @@ -7,7 +7,7 @@ description: - Can create or delete scaling policies for autoscaling groups - Referenced autoscaling groups must already exist version_added: "1.6" -author: Zacharie Eakin +author: "Zacharie Eakin (@zeekin)" options: state: description: diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index ae3df140cce..ee9d5ab1110 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -75,7 +75,7 @@ options: required: false version_added: "1.9" -author: Will Thames +author: "Will Thames (@willthames)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/ec2_tag.py b/cloud/amazon/ec2_tag.py index f82e8ca782d..f79aea4cf25 100644 --- a/cloud/amazon/ec2_tag.py +++ b/cloud/amazon/ec2_tag.py @@ -42,7 +42,7 @@ options: default: null aliases: ['aws_region', 'ec2_region'] -author: Lester Wade +author: "Lester Wade (@lwade)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 3065b550457..0d275cc91d7 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -107,7 +107,7 @@ options: default: present choices: ['absent', 'present', 'list'] version_added: "1.6" -author: Lester Wade +author: "Lester Wade (@lwade)" extends_documentation_fragment: aws ''' @@ -160,8 +160,8 @@ EXAMPLES = ''' instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvdf - with_items: ec2.instances - register: ec2_vol + with_items: ec2.instances + register: ec2_vol # Remove a volume - ec2_vol: @@ -239,15 +239,14 @@ def get_volumes(module, ec2): return vols def delete_volume(module, ec2): - vol = get_volume(module, ec2) - if not vol: - module.exit_json(changed=False) - else: - if vol.attachment_state() is not None: - adata = vol.attach_data - module.fail_json(msg="Volume %s is attached to an instance %s." % (vol.id, adata.instance_id)) - ec2.delete_volume(vol.id) - module.exit_json(changed=True) + volume_id = module.params['id'] + try: + ec2.delete_volume(volume_id) + module.exit_json(changed=True) + except boto.exception.EC2ResponseError as ec2_error: + if ec2_error.code == 'InvalidVolume.NotFound': + module.exit_json(changed=False) + module.fail_json(msg=ec2_error.message) def boto_supports_volume_encryption(): """ @@ -437,11 +436,11 @@ def main(): # Delaying the checks until after the instance check allows us to get volume ids for existing volumes # without needing to pass an unused volume_size - if not volume_size and not (id or name): - module.fail_json(msg="You must specify an existing volume with id or name or a volume_size") + if not volume_size and not (id or name or snapshot): + module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot") - if volume_size and id: - module.fail_json(msg="Cannot specify volume_size and id") + if volume_size and (id or snapshot): + module.fail_json(msg="Cannot specify volume_size together with id or snapshot") if state == 'absent': delete_volume(module, ec2) diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py index 0870c14ec59..611251e307f 100644 --- a/cloud/amazon/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -58,7 +58,7 @@ options: aliases: [] resource_tags: description: - - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.' + - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.' required: true default: null aliases: [] @@ -72,7 +72,7 @@ options: aliases: [] route_tables: description: - - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' + - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' required: false default: null aliases: [] @@ -100,7 +100,7 @@ options: required: true default: null aliases: ['aws_region', 'ec2_region'] -author: Carson Gee +author: "Carson Gee (@carsongee)" extends_documentation_fragment: aws ''' @@ -499,6 +499,9 @@ def create_vpc(module, vpc_conn): for rt in route_tables: try: new_rt = vpc_conn.create_route_table(vpc.id) + new_rt_tags = rt.get('resource_tags', None) + if new_rt_tags: + vpc_conn.create_tags(new_rt.id, new_rt_tags) for route in rt['routes']: route_kwargs = {} if route['gw'] == 'igw': diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py new file mode 100644 index 00000000000..2ee730f59cb --- /dev/null +++ b/cloud/amazon/ec2_vpc_net.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net +short_description: Configure AWS virtual private clouds +description: + - Create or terminate AWS virtual private clouds. This module has a dependency on python-boto. +version_added: "2.0" +author: Jonathan Davila (@defionscode) +options: + name: + description: + - The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists. + required: yes + cidr_block: + description: + - The CIDR of the VPC + required: yes + tenancy: + description: + - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created. + required: false + default: default + choices: [ 'default', 'dedicated' ] + dns_support: + description: + - Whether to enable AWS DNS support. + required: false + default: yes + choices: [ 'yes', 'no' ] + dns_hostnames: + description: + - Whether to enable AWS hostname support. + required: false + default: yes + choices: [ 'yes', 'no' ] + dhcp_opts_id: + description: + - the id of the DHCP options to use for this vpc + default: null + required: false + tags: + description: + - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different. + default: None + required: false + aliases: [ 'resource_tags' ] + state: + description: + - The state of the VPC. Either absent or present. + default: present + required: false + choices: [ 'present', 'absent' ] + multi_ok: + description: + - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created. + default: false + required: false + +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Create a VPC with dedicate tenancy and a couple of tags + +- ec2_vpc_net: + name: Module_dev2 + cidr_block: 10.10.0.0/16 + region: us-east-1 + tags: + module: ec2_vpc_net + this: works + tenancy: dedicated + +''' + +import time +import sys + +try: + import boto + import boto.ec2 + import boto.vpc + from boto.exception import BotoServerError + HAS_BOTO=True +except ImportError: + HAS_BOTO=False + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + +def vpc_exists(module, vpc, name, cidr_block, multi): + """Returns True or False in regards to the existence of a VPC. When supplied + with a CIDR, it will check for matching tags to determine if it is a match + otherwise it will assume the VPC does not exist and thus return false. + """ + matched_vpc = None + + try: + matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block}) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if len(matching_vpcs) == 1: + matched_vpc = matching_vpcs[0] + elif len(matching_vpcs) > 1: + if multi: + module.fail_json(msg='Currently there are %d VPCs that have the same name and ' + 'CIDR block you specified. If you would like to create ' + 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) + + return matched_vpc + + +def update_vpc_tags(vpc, module, vpc_obj, tags, name): + + if tags is None: + tags = dict() + + tags.update({'Name': name}) + try: + current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id})) + if cmp(tags, current_tags): + vpc.create_tags(vpc_obj.id, tags) + return True + else: + return False + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + +def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + + if vpc_obj.dhcp_options_id != dhcp_id: + connection.associate_dhcp_options(dhcp_id, vpc_obj.id) + return True + else: + return False + +def get_vpc_values(vpc_obj): + + if vpc_obj is not None: + vpc_values = vpc_obj.__dict__ + if "region" in vpc_values: + vpc_values.pop("region") + if "item" in vpc_values: + vpc_values.pop("item") + if "connection" in vpc_values: + vpc_values.pop("connection") + return vpc_values + else: + return None + +def main(): + argument_spec=ec2_argument_spec() + argument_spec.update(dict( + name = dict(type='str', default=None, required=True), + cidr_block = dict(type='str', default=None, required=True), + tenancy = dict(choices=['default', 'dedicated'], default='default'), + dns_support = dict(type='bool', default=True), + dns_hostnames = dict(type='bool', default=True), + dhcp_opts_id = dict(type='str', default=None, required=False), + tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']), + state = dict(choices=['present', 'absent'], default='present'), + multi_ok = dict(type='bool', default=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='boto is required for this module') + + name=module.params.get('name') + cidr_block=module.params.get('cidr_block') + tenancy=module.params.get('tenancy') + dns_support=module.params.get('dns_support') + dns_hostnames=module.params.get('dns_hostnames') + dhcp_id=module.params.get('dhcp_opts_id') + tags=module.params.get('tags') + state=module.params.get('state') + multi=module.params.get('multi_ok') + + changed=False + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + if dns_hostnames and not dns_support: + module.fail_json('In order to enable DNS Hostnames you must also enable DNS support') + + if state == 'present': + + # Check if VPC exists + vpc_obj = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_obj is None: + try: + vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy) + changed = True + except BotoServerError, e: + module.fail_json(msg=e) + + if dhcp_id is not None: + try: + if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + changed = True + except BotoServerError, e: + module.fail_json(msg=e) + + if tags is not None or name is not None: + try: + if update_vpc_tags(connection, module, vpc_obj, tags, name): + changed = True + except BotoServerError, e: + module.fail_json(msg=e) + + + # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute + # which is needed in order to detect the current status of DNS options. For now we just update + # the attribute each time and is not used as a changed-factor. + try: + connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support) + connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames) + except BotoServerError, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + # get the vpc obj again in case it has changed + try: + vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0] + except BotoServerError, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) + + elif state == 'absent': + + # Check if VPC exists + vpc_obj = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_obj is not None: + try: + connection.delete_vpc(vpc_obj.id) + vpc_obj = None + changed = True + except BotoServerError, e: + e_msg = boto_exception(e) + module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " + "and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg) + + module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index 5e9c2cc0e12..deea0940995 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -22,7 +22,7 @@ description: - Manage cache clusters in Amazon Elasticache. - Returns information about the specified cache cluster. version_added: "1.4" -author: Jim Dalton +author: "Jim Dalton (@jsdalton)" options: state: description: @@ -42,7 +42,7 @@ options: description: - The version number of the cache engine required: false - default: 1.4.14 + default: none node_type: description: - The compute and memory capacity of the nodes in the cache cluster @@ -485,11 +485,11 @@ def main(): state={'required': True, 'choices': ['present', 'absent', 'rebooted']}, name={'required': True}, engine={'required': False, 'default': 'memcached'}, - cache_engine_version={'required': False, 'default': '1.4.14'}, + cache_engine_version={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, - cache_port={'required': False, 'default': 11211, 'type': 'int'}, parameter_group={'required': False, 'default': None}, + cache_port={'required': False, 'type': 'int'}, cache_subnet_group={'required': False, 'default': None}, cache_security_groups={'required': False, 'default': [default], 'type': 'list'}, diff --git a/cloud/amazon/elasticache_subnet_group.py b/cloud/amazon/elasticache_subnet_group.py new file mode 100644 index 00000000000..4ea7e8aba16 --- /dev/null +++ b/cloud/amazon/elasticache_subnet_group.py @@ -0,0 +1,157 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: elasticache_subnet_group +version_added: "2.0" +short_description: manage Elasticache subnet groups +description: + - Creates, modifies, and deletes Elasticache subnet groups. This module has a dependency on python-boto >= 2.5. +options: + state: + description: + - Specifies whether the subnet should be present or absent. + required: true + default: present + choices: [ 'present' , 'absent' ] + name: + description: + - Database subnet group identifier. + required: true + description: + description: + - Elasticache subnet group description. Only set when a new group is added. + required: false + default: null + subnets: + description: + - List of subnet IDs that make up the Elasticache subnet group. + required: false + default: null + region: + description: + - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used. + required: true + aliases: ['aws_region', 'ec2_region'] +author: "Tim Mahoney (@timmahoney)" +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Add or change a subnet group +- elasticache_subnet_group + state: present + name: norwegian-blue + description: My Fancy Ex Parrot Subnet Group + subnets: + - subnet-aaaaaaaa + - subnet-bbbbbbbb + +# Remove a subnet group +- elasticache_subnet_group: + state: absent + name: norwegian-blue +''' + +try: + import boto + from boto.elasticache.layer1 import ElastiCacheConnection + from boto.regioninfo import RegionInfo + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state = dict(required=True, choices=['present', 'absent']), + name = dict(required=True), + description = dict(required=False), + subnets = dict(required=False, type='list'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + state = module.params.get('state') + group_name = module.params.get('name').lower() + group_description = module.params.get('description') + group_subnets = module.params.get('subnets') or {} + + if state == 'present': + for required in ['name', 'description', 'subnets']: + if not module.params.get(required): + module.fail_json(msg = str("Parameter %s required for state='present'" % required)) + else: + for not_allowed in ['description', 'subnets']: + if module.params.get(not_allowed): + module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) + + # Retrieve any AWS settings from the environment. + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + if not region: + module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) + + + """Get an elasticache connection""" + try: + endpoint = "elasticache.%s.amazonaws.com" % region + connect_region = RegionInfo(name=region, endpoint=endpoint) + conn = ElastiCacheConnection(region=connect_region, **aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=e.message) + + try: + changed = False + exists = False + + try: + matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100) + exists = len(matching_groups) > 0 + except BotoServerError, e: + if e.error_code != 'CacheSubnetGroupNotFoundFault': + module.fail_json(msg = e.error_message) + + if state == 'absent': + if exists: + conn.delete_cache_subnet_group(group_name) + changed = True + else: + if not exists: + new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) + changed = True + else: + changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) + changed = True + + except BotoServerError, e: + if e.error_message != 'No modifications were requested.': + module.fail_json(msg = e.error_message) + else: + changed = False + + module.exit_json(changed=changed) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py new file mode 100644 index 00000000000..df8f3423411 --- /dev/null +++ b/cloud/amazon/iam.py @@ -0,0 +1,714 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: iam +short_description: Manage IAM users, groups, roles and keys +description: + - Allows for the management of IAM users, groups, roles and access keys. +version_added: "2.0" +options: + iam_type: + description: + - Type of IAM resource + required: true + default: null + choices: [ "user", "group", "role"] + name: + description: + - Name of IAM resource to create or identify + required: true + new_name: + description: + - When state is update, will replace name with new_name on IAM resource + required: false + default: null + new_path: + description: + - When state is update, will replace the path with new_path on the IAM resource + required: false + default: null + state: + description: + - Whether to create, delete or update the IAM resource. Note, roles cannot be updated. + required: true + default: null + choices: [ "present", "absent", "update" ] + path: + description: + - When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match. + required: false + default: "/" + access_key_state: + description: + - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified. + required: false + default: null + choices: [ "create", "remove", "active", "inactive"] + key_count: + description: + - When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1. + required: false + default: '1' + access_key_ids: + description: + - A list of the keys that you want impacted by the access_key_state paramter. + groups: + description: + - A list of groups the user should belong to. When update, will gracefully remove groups not listed. + required: false + default: null + password: + description: + - When type is user and state is present, define the users login password. Also works with update. Note that always returns changed. + required: false + default: null + update_password: + required: false + default: always + choices: ['always', 'on_create'] + description: + - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] +notes: + - 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.' +author: + - "Jonathan I. Davila (@defionscode)" + - "Paul Seiffert (@seiffert)" +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic user creation example +tasks: +- name: Create two new IAM users with API keys + iam: + iam_type: user + name: "{{ item }}" + state: present + password: "{{ temp_pass }}" + access_key_state: create + with_items: + - jcleese + - mpython + +# Advanced example, create two new groups and add the pre-existing user +# jdavila to both groups. +task: +- name: Create Two Groups, Mario and Luigi + iam: + iam_type: group + name: "{{ item }}" + state: present + with_items: + - Mario + - Luigi + register: new_groups + +- name: + iam: + iam_type: user + name: jdavila + state: update + groups: "{{ item.created_group.group_name }}" + with_items: new_groups.results + +''' + +import json +import itertools +import sys +try: + import boto + import boto.iam + import boto.ec2 + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + + +def create_user(module, iam, name, pwd, path, key_state, key_count): + key_qty = 0 + keys = [] + try: + user_meta = iam.create_user( + name, path).create_user_response.create_user_result.user + changed = True + if pwd is not None: + pwd = iam.create_login_profile(name, pwd) + if key_state in ['create']: + if key_count: + while key_count > key_qty: + keys.append(iam.create_access_key( + user_name=name).create_access_key_response.\ + create_access_key_result.\ + access_key) + key_qty += 1 + else: + keys = None + except boto.exception.BotoServerError, err: + module.fail_json(changed=False, msg=str(err)) + else: + user_info = dict(created_user=user_meta, password=pwd, access_keys=keys) + return (user_info, changed) + + +def delete_user(module, iam, name): + try: + current_keys = [ck['access_key_id'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] + for key in current_keys: + iam.delete_access_key(key, name) + del_meta = iam.delete_user(name).delete_user_response + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: + iam.delete_user_policy(name, policy) + try: + del_meta = iam.delete_user(name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" + "that %s has Managed Polices. This is not " + "currently supported by boto. Please detach the polices " + "through the console and try again." % name) + else: + module.fail_json(changed=changed, msg=str(err)) + else: + changed = True + return del_meta, name, changed + else: + changed = True + return del_meta, name, changed + + +def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated): + changed = False + name_change = False + if updated and new_name: + name = new_name + try: + current_keys, status = \ + [ck['access_key_id'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata],\ + [ck['status'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] + key_qty = len(current_keys) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if 'cannot be found' in error_msg and updated: + current_keys, status = \ + [ck['access_key_id'] for ck in + iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata],\ + [ck['status'] for ck in + iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] + name = new_name + else: + module.fail_json(changed=False, msg=str(err)) + + updated_key_list = {} + + if new_name or new_path: + c_path = iam.get_user(name).get_user_result.user['path'] + if (name != new_name) or (c_path != new_path): + changed = True + try: + if not updated: + user = iam.update_user( + name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata + else: + user = iam.update_user( + name, new_path=new_path).update_user_response.response_metadata + user['updates'] = dict( + old_username=name, new_username=new_name, old_path=c_path, new_path=new_path) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + module.fail_json(changed=False, msg=str(err)) + else: + if not updated: + name_change = True + + if pwd: + try: + iam.update_login_profile(name, pwd) + changed = True + except boto.exception.BotoServerError: + try: + iam.create_login_profile(name, pwd) + changed = True + except boto.exception.BotoServerError, err: + error_msg = boto_exception(str(err)) + if 'Password does not conform to the account password policy' in error_msg: + module.fail_json(changed=False, msg="Passsword doesn't conform to policy") + else: + module.fail_json(msg=error_msg) + else: + try: + iam.delete_login_profile(name) + changed = True + except boto.exception.BotoServerError: + pass + + if key_state == 'create': + try: + while key_count > key_qty: + new_key = iam.create_access_key( + user_name=name).create_access_key_response.create_access_key_result.access_key + key_qty += 1 + changed = True + + except boto.exception.BotoServerError, err: + module.fail_json(changed=False, msg=str(err)) + + if keys and key_state: + for access_key in keys: + if access_key in current_keys: + for current_key, current_key_state in zip(current_keys, status): + if key_state != current_key_state.lower(): + try: + iam.update_access_key( + access_key, key_state.capitalize(), user_name=name) + except boto.exception.BotoServerError, err: + module.fail_json(changed=False, msg=str(err)) + else: + changed = True + + if key_state == 'remove': + try: + iam.delete_access_key(access_key, user_name=name) + except boto.exception.BotoServerError, err: + module.fail_json(changed=False, msg=str(err)) + else: + changed = True + + try: + final_keys, final_key_status = \ + [ck['access_key_id'] for ck in + iam.get_all_access_keys(name). + list_access_keys_result. + access_key_metadata],\ + [ck['status'] for ck in + iam.get_all_access_keys(name). + list_access_keys_result. + access_key_metadata] + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + + for fk, fks in zip(final_keys, final_key_status): + updated_key_list.update({fk: fks}) + + return name_change, updated_key_list, changed + + +def set_users_groups(module, iam, name, groups, updated=None, +new_name=None): + """ Sets groups for a user, will purge groups not explictly passed, while + retaining pre-existing groups that also are in the new list. + """ + changed = False + + if updated: + name = new_name + + try: + orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user( + name).list_groups_for_user_result.groups] + remove_groups = [ + rg for rg in frozenset(orig_users_groups).difference(groups)] + new_groups = [ + ng for ng in frozenset(groups).difference(orig_users_groups)] + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + else: + if len(orig_users_groups) > 0: + for new in new_groups: + iam.add_user_to_group(new, name) + for rm in remove_groups: + iam.remove_user_from_group(rm, name) + else: + for group in groups: + try: + iam.add_user_to_group(group, name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('The group with name %s cannot be found.' % group) in error_msg: + module.fail_json(changed=False, msg="Group %s doesn't exist" % group) + + + if len(remove_groups) > 0 or len(new_groups) > 0: + changed = True + + return (groups, changed) + + +def create_group(module=None, iam=None, name=None, path=None): + changed = False + try: + iam.create_group( + name, path).create_group_response.create_group_result.group + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + else: + changed = True + return name, changed + + +def delete_group(module=None, iam=None, name=None): + changed = False + try: + iam.delete_group(name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names: + iam.delete_group_policy(name, policy) + try: + iam.delete_group(name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" + "that %s has Managed Polices. This is not " + "currently supported by boto. Please detach the polices " + "through the console and try again." % name) + else: + module.fail_json(changed=changed, msg=str(err)) + else: + changed = True + else: + changed = True + return changed, name + +def update_group(module=None, iam=None, name=None, new_name=None, new_path=None): + changed = False + try: + current_group_path = iam.get_group( + name).get_group_response.get_group_result.group['path'] + if new_path: + if current_group_path != new_path: + iam.update_group(name, new_path=new_path) + changed = True + if new_name: + if name != new_name: + iam.update_group(name, new_group_name=new_name, new_path=new_path) + changed = True + name = new_name + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + + return changed, name, new_path, current_group_path + + +def create_role(module, iam, name, path, role_list, prof_list): + changed = False + try: + if name not in role_list: + changed = True + iam.create_role( + name, path=path).create_role_response.create_role_result.role.role_name + + if name not in prof_list: + iam.create_instance_profile(name, path=path) + iam.add_role_to_instance_profile(name, name) + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + else: + updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. + list_roles_result.roles] + return changed, updated_role_list + + +def delete_role(module, iam, name, role_list, prof_list): + changed = False + try: + if name in role_list: + cur_ins_prof = [rp['instance_profile_name'] for rp in + iam.list_instance_profiles_for_role(name). + list_instance_profiles_for_role_result. + instance_profiles] + for profile in cur_ins_prof: + iam.remove_role_from_instance_profile(profile, name) + try: + iam.delete_role(name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + for policy in iam.list_role_policies(name).list_role_policies_result.policy_names: + iam.delete_role_policy(name, policy) + try: + iam.delete_role(name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" + "that %s has Managed Polices. This is not " + "currently supported by boto. Please detach the polices " + "through the console and try again." % name) + else: + module.fail_json(changed=changed, msg=str(err)) + else: + changed = True + + else: + changed = True + + for prof in prof_list: + if name == prof: + iam.delete_instance_profile(name) + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + else: + updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. + list_roles_result.roles] + return changed, updated_role_list + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + iam_type=dict( + default=None, required=True, choices=['user', 'group', 'role']), + groups=dict(type='list', default=None, required=False), + state=dict( + default=None, required=True, choices=['present', 'absent', 'update']), + password=dict(default=None, required=False, no_log=True), + update_password=dict(default='always', required=False, choices=['always', 'on_create']), + access_key_state=dict(default=None, required=False, choices=[ + 'active', 'inactive', 'create', 'remove', + 'Active', 'Inactive', 'Create', 'Remove']), + access_key_ids=dict(type='list', default=None, required=False), + key_count=dict(type='int', default=1, required=False), + name=dict(default=None, required=False), + new_name=dict(default=None, required=False), + path=dict(default='/', required=False), + new_path=dict(default=None, required=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[], + ) + + if not HAS_BOTO: + module.fail_json(msg='This module requires boto, please install it') + + state = module.params.get('state').lower() + iam_type = module.params.get('iam_type').lower() + groups = module.params.get('groups') + name = module.params.get('name') + new_name = module.params.get('new_name') + password = module.params.get('password') + update_pw = module.params.get('update_password') + path = module.params.get('path') + new_path = module.params.get('new_path') + key_count = module.params.get('key_count') + key_state = module.params.get('access_key_state') + if key_state: + key_state = key_state.lower() + if any([n in key_state for n in ['active', 'inactive']]) and not key_ids: + module.fail_json(changed=False, msg="At least one access key has to be defined in order" + " to use 'active' or 'inactive'") + key_ids = module.params.get('access_key_ids') + + if iam_type == 'user' and module.params.get('password') is not None: + pwd = module.params.get('password') + elif iam_type != 'user' and module.params.get('password') is not None: + module.fail_json(msg="a password is being specified when the iam_type " + "is not user. Check parameters") + else: + pwd = None + + if iam_type != 'user' and (module.params.get('access_key_state') is not None or + module.params.get('access_key_id') is not None): + module.fail_json(msg="the IAM type must be user, when IAM access keys " + "are being modified. Check parameters") + + if iam_type == 'role' and state == 'update': + module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, " + "please specificy present or absent") + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + try: + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + result = {} + changed = False + + orig_group_list = [gl['group_name'] for gl in iam.get_all_groups(). + list_groups_result. + groups] + orig_user_list = [ul['user_name'] for ul in iam.get_all_users(). + list_users_result. + users] + orig_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. + list_roles_result. + roles] + orig_prof_list = [ap['instance_profile_name'] for ap in iam.list_instance_profiles(). + list_instance_profiles_response. + list_instance_profiles_result. + instance_profiles] + + if iam_type == 'user': + been_updated = False + user_groups = None + user_exists = any([n in [name, new_name] for n in orig_user_list]) + if user_exists: + current_path = iam.get_user(name).get_user_result.user['path'] + if not new_path and current_path != path: + new_path = path + path = current_path + + if state == 'present' and not user_exists and not new_name: + (meta, changed) = create_user( + module, iam, name, password, path, key_state, key_count) + keys = iam.get_all_access_keys(name).list_access_keys_result.\ + access_key_metadata + if groups: + (user_groups, changed) = set_users_groups( + module, iam, name, groups, been_updated, new_name) + module.exit_json( + user_meta=meta, groups=user_groups, keys=keys, changed=changed) + + elif state in ['present', 'update'] and user_exists: + if update_pw == 'on_create': + password = None + if name not in orig_user_list and new_name in orig_user_list: + been_updated = True + name_change, key_list, user_changed = update_user( + module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated) + if name_change and new_name: + orig_name = name + name = new_name + if groups: + user_groups, groups_changed = set_users_groups( + module, iam, name, groups, been_updated, new_name) + if groups_changed == user_changed: + changed = groups_changed + else: + changed = True + else: + changed = user_changed + if new_name and new_path: + module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name, + new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list) + elif new_name and not new_path and not been_updated: + module.exit_json( + changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list) + elif new_name and not new_path and been_updated: + module.exit_json( + changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state) + elif not new_name and new_path: + module.exit_json( + changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path, keys=key_list) + else: + module.exit_json( + changed=changed, groups=user_groups, user_name=name, keys=key_list) + elif state == 'update' and not user_exists: + module.fail_json( + msg="The user %s does not exit. No update made." % name) + elif state == 'absent': + if name in orig_user_list: + set_users_groups(module, iam, name, '') + del_meta, name, changed = delete_user(module, iam, name) + module.exit_json( + deletion_meta=del_meta, deleted_user=name, changed=changed) + else: + module.exit_json( + changed=False, msg="User %s is already absent from your AWS IAM users" % name) + + elif iam_type == 'group': + group_exists = name in orig_group_list + + if state == 'present' and not group_exists: + new_group, changed = create_group(iam=iam, name=name, path=path) + module.exit_json(changed=changed, group_name=new_group) + elif state in ['present', 'update'] and group_exists: + changed, updated_name, updated_path, cur_path = update_group( + iam=iam, name=name, new_name=new_name, new_path=new_path) + + if new_path and new_name: + module.exit_json(changed=changed, old_group_name=name, + new_group_name=updated_name, old_path=cur_path, + new_group_path=updated_path) + + if new_path and not new_name: + module.exit_json(changed=changed, group_name=name, + old_path=cur_path, + new_group_path=updated_path) + + if not new_path and new_name: + module.exit_json(changed=changed, old_group_name=name, + new_group_name=updated_name, group_path=cur_path) + + if not new_path and not new_name: + module.exit_json( + changed=changed, group_name=name, group_path=cur_path) + elif state == 'update' and not group_exists: + module.fail_json( + changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) + elif state == 'absent': + if name in orig_group_list: + removed_group, changed = delete_group(iam=iam, name=name) + module.exit_json(changed=changed, delete_group=removed_group) + else: + module.exit_json(changed=changed, msg="Group already absent") + + elif iam_type == 'role': + role_list = [] + if state == 'present': + changed, role_list = create_role( + module, iam, name, path, orig_role_list, orig_prof_list) + elif state == 'absent': + changed, role_list = delete_role( + module, iam, name, orig_role_list, orig_prof_list) + elif state == 'update': + module.fail_json( + changed=False, msg='Role update not currently supported by boto.') + module.exit_json(changed=changed, roles=role_list) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/cloud/amazon/iam_cert.py b/cloud/amazon/iam_cert.py new file mode 100644 index 00000000000..1f58be753c8 --- /dev/null +++ b/cloud/amazon/iam_cert.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: iam_cert +short_description: Manage server certificates for use on ELBs and CloudFront +description: + - Allows for the management of server certificates +version_added: "2.0" +options: + name: + description: + - Name of certificate to add, update or remove. + required: true + aliases: [] + new_name: + description: + - When present, this will update the name of the cert with the value passed here. + required: false + aliases: [] + new_path: + description: + - When present, this will update the path of the cert with the value passed here. + required: false + aliases: [] + state: + description: + - Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified. + required: true + default: null + choices: [ "present", "absent" ] + aliases: [] + path: + description: + - When creating or updating, specify the desired path of the certificate + required: false + default: "/" + aliases: [] + cert_chain: + description: + - The path to the CA certificate chain in PEM encoded format. + required: false + default: null + aliases: [] + cert: + description: + - The path to the certificate body in PEM encoded format. + required: false + aliases: [] + key: + description: + - The path to the private key of the certificate in PEM encoded format. + dup_ok: + description: + - By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique. + required: false + default: False + aliases: [] + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + + +requirements: [ "boto" ] +author: Jonathan I. Davila +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic server certificate upload +tasks: +- name: Upload Certifcate + iam_cert: + name: very_ssl + state: present + cert: somecert.pem + key: privcertkey + cert_chain: myverytrustedchain + +''' +import json +import sys +try: + import boto + import boto.iam + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + +def cert_meta(iam, name): + opath = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + path + ocert = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + certificate_body + ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + server_certificate_id + upload_date = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + upload_date + exp = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + expiration + return opath, ocert, ocert_id, upload_date, exp + +def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok): + update=False + if any(ct in orig_cert_names for ct in [name, new_name]): + for i_name in [name, new_name]: + if i_name is None: + continue + + if cert is not None: + try: + c_index=orig_cert_names.index(i_name) + except NameError: + continue + else: + if orig_cert_bodies[c_index] == cert: + update=True + break + elif orig_cert_bodies[c_index] != cert: + module.fail_json(changed=False, msg='A cert with the name %s already exists and' + ' has a different certificate body associated' + ' with it. Certifcates cannot have the same name') + else: + update=True + break + elif cert in orig_cert_bodies and not dup_ok: + for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies): + if crt_body == cert: + module.fail_json(changed=False, msg='This certificate already' + ' exists under the name %s' % crt_name) + + return update + + +def cert_action(module, iam, name, cpath, new_name, new_path, state, + cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok): + if state == 'present': + update = dup_check(module, iam, name, new_name, cert, orig_cert_names, + orig_cert_bodies, dup_ok) + if update: + opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name) + changed=True + if new_name and new_path: + iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path) + module.exit_json(changed=changed, original_name=name, new_name=new_name, + original_path=opath, new_path=new_path, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + elif new_name and not new_path: + iam.update_server_cert(name, new_cert_name=new_name) + module.exit_json(changed=changed, original_name=name, new_name=new_name, + cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + elif not new_name and new_path: + iam.update_server_cert(name, new_path=new_path) + module.exit_json(changed=changed, name=new_name, + original_path=opath, new_path=new_path, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + else: + changed=False + module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, + msg='No new path or name specified. No changes made') + else: + changed=True + iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath) + opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name) + module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + elif state == 'absent': + if name in orig_cert_names: + changed=True + iam.delete_server_cert(name) + module.exit_json(changed=changed, deleted_cert=name) + else: + changed=False + module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name) + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict( + default=None, required=True, choices=['present', 'absent']), + name=dict(default=None, required=False), + cert=dict(default=None, required=False), + key=dict(default=None, required=False), + cert_chain=dict(default=None, required=False), + new_name=dict(default=None, required=False), + path=dict(default='/', required=False), + new_path=dict(default=None, required=False), + dup_ok=dict(default=False, required=False, choices=[False, True]) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[], + ) + + if not HAS_BOTO: + module.fail_json(msg="Boto is required for this module") + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + + try: + iam = boto.iam.connection.IAMConnection( + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key, + ) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + state = module.params.get('state') + name = module.params.get('name') + path = module.params.get('path') + new_name = module.params.get('new_name') + new_path = module.params.get('new_path') + cert_chain = module.params.get('cert_chain') + dup_ok = module.params.get('dup_ok') + if state == 'present': + cert = open(module.params.get('cert'), 'r').read().rstrip() + key = open(module.params.get('key'), 'r').read().rstrip() + if cert_chain is not None: + cert_chain = open(module.params.get('cert_chain'), 'r').read() + else: + key=cert=chain=None + + orig_certs = [ctb['server_certificate_name'] for ctb in \ + iam.get_all_server_certs().\ + list_server_certificates_result.\ + server_certificate_metadata_list] + orig_bodies = [iam.get_server_certificate(thing).\ + get_server_certificate_result.\ + certificate_body \ + for thing in orig_certs] + if new_name == name: + new_name = None + if new_path == path: + new_path = None + + changed = False + try: + cert_action(module, iam, name, path, new_name, new_path, state, + cert, key, cert_chain, orig_certs, orig_bodies, dup_ok) + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err), debug=[cert,key]) + + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py new file mode 100644 index 00000000000..9213d1585b0 --- /dev/null +++ b/cloud/amazon/iam_policy.py @@ -0,0 +1,353 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: iam_policy +short_description: Manage IAM policies for users, groups, and roles +description: + - Allows uploading or removing IAM policies for IAM users, groups or roles. +version_added: "2.0" +options: + iam_type: + description: + - Type of IAM resource + required: true + default: null + choices: [ "user", "group", "role"] + aliases: [] + iam_name: + description: + - Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name. + required: true + aliases: [] + policy_name: + description: + - The name label for the policy to create or remove. + required: false + aliases: [] + policy_document: + description: + - The path to the properly json formatted policy file (mutually exclusive with C(policy_json)) + required: false + aliases: [] + policy_json: + description: + - A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly) + required: false + aliases: [] + state: + description: + - Whether to create or delete the IAM policy. + required: true + default: null + choices: [ "present", "absent"] + aliases: [] + skip_duplicates: + description: + - By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules. + required: false + default: "/" + aliases: [] + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + +requirements: [ "boto" ] +notes: + - 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.' +author: "Jonathan I. Davila (@defionscode)" +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Create and policy with the name of 'Admin' to the group 'administrators' +tasks: +- name: Create two new IAM users with API keys + iam_policy: + iam_type: group + iam_name: administrators + policy_name: Admin + state: present + policy_document: admin_policy.json + +# Advanced example, create two new groups and add a READ-ONLY policy to both +# groups. +task: +- name: Create Two Groups, Mario and Luigi + iam: + iam_type: group + name: "{{ item }}" + state: present + with_items: + - Mario + - Luigi + register: new_groups + +- name: + iam_policy: + iam_type: group + iam_name: "{{ item.created_group.group_name }}" + policy_name: "READ-ONLY" + policy_document: readonlypolicy.json + state: present + with_items: new_groups.results + +# Create a new S3 policy with prefix per user +tasks: +- name: Create S3 policy from template + iam_policy: + iam_type: user + iam_name: "{{ item.user }}" + policy_name: "s3_limited_access_{{ item.prefix }}" + state: present + policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} " + with_items: + - user: s3_user + prefix: s3_user_prefix + +''' +import json +import urllib +try: + import boto + import boto.iam + import boto.ec2 + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + + +def user_action(module, iam, name, policy_name, skip, pdoc, state): + policy_match = False + changed = False + try: + current_policies = [cp for cp in iam.get_all_user_policies(name). + list_user_policies_result. + policy_names] + for pol in current_policies: + ''' + urllib is needed here because boto returns url encoded strings instead + ''' + if urllib.unquote(iam.get_user_policy(name, pol). + get_user_policy_result.policy_document) == pdoc: + policy_match = True + if policy_match: + msg=("The policy document you specified already exists " + "under the name %s." % pol) + if state == 'present' and skip: + if policy_name not in current_policies and not policy_match: + changed = True + iam.put_user_policy(name, policy_name, pdoc) + elif state == 'present' and not skip: + changed = True + iam.put_user_policy(name, policy_name, pdoc) + elif state == 'absent': + try: + iam.delete_user_policy(name, policy_name) + changed = True + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if 'cannot be found.' in error_msg: + changed = False + module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name) + + updated_policies = [cp for cp in iam.get_all_user_policies(name). + list_user_policies_result. + policy_names] + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + module.fail_json(changed=changed, msg=error_msg) + + return changed, name, updated_policies + + +def role_action(module, iam, name, policy_name, skip, pdoc, state): + policy_match = False + changed = False + try: + current_policies = [cp for cp in iam.list_role_policies(name). + list_role_policies_result. + policy_names] + for pol in current_policies: + if urllib.unquote(iam.get_role_policy(name, pol). + get_role_policy_result.policy_document) == pdoc: + policy_match = True + if policy_match: + msg=("The policy document you specified already exists " + "under the name %s." % pol) + if state == 'present' and skip: + if policy_name not in current_policies and not policy_match: + changed = True + iam.put_role_policy(name, policy_name, pdoc) + elif state == 'present' and not skip: + changed = True + iam.put_role_policy(name, policy_name, pdoc) + elif state == 'absent': + try: + iam.delete_role_policy(name, policy_name) + changed = True + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if 'cannot be found.' in error_msg: + changed = False + module.exit_json(changed=changed, + msg="%s policy is already absent" % policy_name) + + updated_policies = [cp for cp in iam.list_role_policies(name). + list_role_policies_result. + policy_names] + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + module.fail_json(changed=changed, msg=error_msg) + + return changed, name, updated_policies + + +def group_action(module, iam, name, policy_name, skip, pdoc, state): + policy_match = False + changed = False + msg='' + try: + current_policies = [cp for cp in iam.get_all_group_policies(name). + list_group_policies_result. + policy_names] + for pol in current_policies: + if urllib.unquote(iam.get_group_policy(name, pol). + get_group_policy_result.policy_document) == pdoc: + policy_match = True + if policy_match: + msg=("The policy document you specified already exists " + "under the name %s." % pol) + if state == 'present' and skip: + if policy_name not in current_policies and not policy_match: + changed = True + iam.put_group_policy(name, policy_name, pdoc) + elif state == 'present' and not skip: + changed = True + iam.put_group_policy(name, policy_name, pdoc) + elif state == 'absent': + try: + iam.delete_group_policy(name, policy_name) + changed = True + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if 'cannot be found.' in error_msg: + changed = False + module.exit_json(changed=changed, + msg="%s policy is already absent" % policy_name) + + updated_policies = [cp for cp in iam.get_all_group_policies(name). + list_group_policies_result. + policy_names] + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + module.fail_json(changed=changed, msg=error_msg) + + return changed, name, updated_policies, msg + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + iam_type=dict( + default=None, required=True, choices=['user', 'group', 'role']), + state=dict( + default=None, required=True, choices=['present', 'absent']), + iam_name=dict(default=None, required=False), + policy_name=dict(default=None, required=True), + policy_document=dict(default=None, required=False), + policy_json=dict(type='str', default=None, required=False), + skip_duplicates=dict(type='bool', default=True, required=False) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + state = module.params.get('state').lower() + iam_type = module.params.get('iam_type').lower() + state = module.params.get('state') + name = module.params.get('iam_name') + policy_name = module.params.get('policy_name') + skip = module.params.get('skip_duplicates') + + if module.params.get('policy_document') != None and module.params.get('policy_json') != None: + module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set') + + if module.params.get('policy_document') != None: + with open(module.params.get('policy_document'), 'r') as json_data: + pdoc = json.dumps(json.load(json_data)) + json_data.close() + elif module.params.get('policy_json') != None: + try: + pdoc = json.dumps(json.loads(module.params.get('policy_json'))) + except Exception as e: + module.fail_json(msg=str(e) + '\n' + module.params.get('policy_json')) + else: + pdoc=None + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + try: + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + changed = False + + if iam_type == 'user': + changed, user_name, current_policies = user_action(module, iam, name, + policy_name, skip, pdoc, + state) + module.exit_json(changed=changed, user_name=name, policies=current_policies) + elif iam_type == 'role': + changed, role_name, current_policies = role_action(module, iam, name, + policy_name, skip, pdoc, + state) + module.exit_json(changed=changed, role_name=name, policies=current_policies) + elif iam_type == 'group': + changed, group_name, current_policies, msg = group_action(module, iam, name, + policy_name, skip, pdoc, + state) + module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py old mode 100755 new mode 100644 index 34f45218f0a..9e98f50230b --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -24,147 +24,123 @@ description: options: command: description: - - Specifies the action to take. + - Specifies the action to take. required: true - default: null - aliases: [] - choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ] + choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ] instance_name: description: - Database instance identifier. Required except when using command=facts or command=delete on just a snapshot required: false default: null - aliases: [] source_instance: description: - Name of the database to replicate. Used only when command=replicate. required: false default: null - aliases: [] db_engine: description: - - The type of database. Used only when command=create. + - The type of database. Used only when command=create. required: false default: null - aliases: [] choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'] size: description: - Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify. required: false default: null - aliases: [] instance_type: description: - - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. + - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. required: false default: null - aliases: [] username: description: - Master database username. Used only when command=create. required: false default: null - aliases: [] password: description: - Password for the master database username. Used only when command=create or command=modify. required: false default: null - aliases: [] region: description: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: true - default: null aliases: [ 'aws_region', 'ec2_region' ] db_name: description: - Name of a database to create within the instance. If not specified then no database is created. Used only when command=create. required: false default: null - aliases: [] engine_version: description: - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used. required: false default: null - aliases: [] parameter_group: description: - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify. required: false default: null - aliases: [] license_model: description: - - The license model for this DB instance. Used only when command=create or command=restore. + - The license model for this DB instance. Used only when command=create or command=restore. required: false default: null - aliases: [] choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ] multi_zone: description: - Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify. - choices: [ "yes", "no" ] + choices: [ "yes", "no" ] required: false default: null - aliases: [] iops: description: - Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000. required: false default: null - aliases: [] security_groups: description: - Comma separated list of one or more security groups. Used only when command=create or command=modify. required: false default: null - aliases: [] vpc_security_groups: description: - Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify. required: false default: null - aliases: [] port: description: - Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate. required: false default: null - aliases: [] upgrade: description: - Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate. required: false default: no choices: [ "yes", "no" ] - aliases: [] option_group: description: - The name of the option group to use. If not specified then the default option group is used. Used only when command=create. required: false default: null - aliases: [] maint_window: description: - "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify." required: false default: null - aliases: [] backup_window: description: - Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify. required: false default: null - aliases: [] backup_retention: description: - "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify." required: false default: null - aliases: [] zone: description: - availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore. @@ -176,18 +152,15 @@ options: - VPC subnet group. If specified then a VPC instance is created. Used only when command=create. required: false default: null - aliases: [] snapshot: description: - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot. required: false default: null - aliases: [] aws_secret_key: description: - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false - default: null aliases: [ 'ec2_secret_key', 'secret_key' ] aws_access_key: description: @@ -201,50 +174,53 @@ options: required: false default: "no" choices: [ "yes", "no" ] - aliases: [] wait_timeout: description: - how long before wait gives up, in seconds default: 300 - aliases: [] apply_immediately: description: - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. default: no choices: [ "yes", "no" ] - aliases: [] + force_failover: + description: + - Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover. + required: false + default: "no" + choices: [ "yes", "no" ] + version_added: "2.0" new_instance_name: description: - Name to rename an instance to. Used only when command=modify. required: false default: null - aliases: [] - version_added: 1.5 + version_added: "1.5" character_set_name: description: - Associate the DB instance with a specified character set. Used with command=create. required: false default: null - aliases: [] - version_added: 1.9 + version_added: "1.9" publicly_accessible: description: - explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0 required: false default: null - aliases: [] - version_added: 1.9 + version_added: "1.9" tags: description: - tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 required: false default: null - aliases: [] - version_added: 1.9 + version_added: "1.9" requirements: - "python >= 2.6" - "boto" -author: Bruce Pennypacker, Will Thames +author: + - "Bruce Pennypacker (@bpennypacker)" + - "Will Thames (@willthames)" + ''' # FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD @@ -289,6 +265,13 @@ EXAMPLES = ''' instance_name: new-database new_instance_name: renamed-database wait: yes + +# Reboot an instance and wait for it to become available again +- rds + command: reboot + instance_name: database + wait: yes + ''' import sys @@ -377,6 +360,13 @@ class RDSConnection: except boto.exception.BotoServerError, e: raise RDSException(e) + def reboot_db_instance(self, instance_name, **params): + try: + result = self.connection.reboot_dbinstance(instance_name) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) @@ -461,6 +451,13 @@ class RDS2Connection: except boto.exception.BotoServerError, e: raise RDSException(e) + def reboot_db_instance(self, instance_name, **params): + try: + result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] @@ -624,6 +621,8 @@ def await_resource(conn, resource, status, module): if resource.name is None: module.fail_json(msg="Problem with instance %s" % resource.instance) resource = conn.get_db_instance(resource.name) + if resource is None: + break return resource @@ -710,7 +709,10 @@ def delete_db_instance_or_snapshot(module, conn): if instance_name: if snapshot: params["skip_final_snapshot"] = False - params["final_snapshot_id"] = snapshot + if has_rds2: + params["final_db_snapshot_identifier"] = snapshot + else: + params["final_snapshot_id"] = snapshot else: params["skip_final_snapshot"] = True result = conn.delete_db_instance(instance_name, **params) @@ -839,6 +841,31 @@ def snapshot_db_instance(module, conn): module.exit_json(changed=changed, snapshot=resource.get_data()) +def reboot_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = [] + + if has_rds2: + valid_vars.append('force_failover') + + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + result = conn.get_db_instance(instance_name) + changed = False + try: + result = conn.reboot_db_instance(instance_name, **params) + changed = True + except RDSException, e: + module.fail_json(msg=e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + def restore_db_instance(module, conn): required_vars = ['instance_name', 'snapshot'] valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone', @@ -910,6 +937,7 @@ def validate_parameters(required_vars, valid_vars, module): 'instance_type': 'db_instance_class', 'password': 'master_user_password', 'new_instance_name': 'new_db_instance_identifier', + 'force_failover': 'force_failover', } if has_rds2: optional_params.update(optional_params_rds2) @@ -952,7 +980,7 @@ def validate_parameters(required_vars, valid_vars, module): def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True), + command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True), instance_name = dict(required=False), source_instance = dict(required=False), db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), @@ -984,6 +1012,7 @@ def main(): tags = dict(type='dict', required=False), publicly_accessible = dict(required=False), character_set_name = dict(required=False), + force_failover = dict(type='bool', required=False, default=False) ) ) @@ -1002,6 +1031,7 @@ def main(): 'modify': modify_db_instance, 'promote': promote_db_instance, 'snapshot': snapshot_db_instance, + 'reboot': reboot_db_instance, 'restore': restore_db_instance, } diff --git a/cloud/amazon/rds_param_group.py b/cloud/amazon/rds_param_group.py index cba482c72d6..7b875304810 100644 --- a/cloud/amazon/rds_param_group.py +++ b/cloud/amazon/rds_param_group.py @@ -67,7 +67,7 @@ options: required: true default: null aliases: ['aws_region', 'ec2_region'] -author: Scott Anderson +author: "Scott Anderson (@tastychutney)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py index 9d8cd786218..3b998c34225 100644 --- a/cloud/amazon/rds_subnet_group.py +++ b/cloud/amazon/rds_subnet_group.py @@ -53,7 +53,7 @@ options: required: true default: null aliases: ['aws_region', 'ec2_region'] -author: Scott Anderson +author: "Scott Anderson (@tastychutney)" extends_documentation_fragment: aws ''' @@ -138,10 +138,14 @@ def main(): else: if not exists: new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets) - + changed = True else: - changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) - + # Sort the subnet groups before we compare them + matching_groups[0].subnet_ids.sort() + group_subnets.sort() + if ( (matching_groups[0].name != group_name) or (matching_groups[0].description != group_description) or (matching_groups[0].subnet_ids != group_subnets) ): + changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) + changed = True except BotoServerError, e: module.fail_json(msg = e.error_message) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index d6c758b3974..aca01193555 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -35,6 +35,12 @@ options: required: true default: null aliases: [] + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone to modify + required: false + version_added: 2.0 + default: null record: description: - The full DNS record to create or delete @@ -93,7 +99,53 @@ options: required: false default: false version_added: "1.9" -author: Bruce Pennypacker + identifier: + description: + - Weighted and latency-based resource record sets only. An identifier + that differentiates among multiple resource record sets that have the + same combination of DNS name and type. + required: false + default: null + version_added: "2.0" + weight: + description: + - Weighted resource record sets only. Among resource record sets that + have the same combination of DNS name and type, a value that + determines what portion of traffic for the current resource record set + is routed to the associated location. + required: false + default: null + version_added: "2.0" + region: + description: + - Latency-based resource record sets only Among resource record sets + that have the same combination of DNS name and type, a value that + determines which region this should be associated with for the + latency-based routing + required: false + default: null + version_added: "2.0" + health_check: + description: + - Health check to associate with this record + required: false + default: null + version_added: "2.0" + failover: + description: + - Failover resource record sets only. Whether this is the primary or + secondary resource record set. + required: false + default: null + version_added: "2.0" + vpc_id: + description: + - "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC." + - This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs. + required: false + default: null + version_added: "2.0" +author: "Bruce Pennypacker (@bpennypacker)" extends_documentation_fragment: aws ''' @@ -156,6 +208,40 @@ EXAMPLES = ''' alias=True alias_hosted_zone_id="{{ elb_zone_id }}" +# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value +# that the entire parameter list must be quoted: +- route53: + command: "create" + zone: "foo.com" + hosted_zone_id: "Z2AABBCCDDEEFF" + record: "localhost.foo.com" + type: "AAAA" + ttl: "7200" + value: "::1" + +# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value +# that the entire parameter list must be quoted: +- route53: + command: "create" + zone: "foo.com" + hosted_zone_id: "Z2AABBCCDDEEFF" + record: "localhost.foo.com" + type: "AAAA" + ttl: "7200" + value: "::1" + +# Use a routing policy to distribute traffic: +- route53: + command: "create" + zone: "foo.com" + record: "www.foo.com" + type: "CNAME" + value: "host1.foo.com" + ttl: 30 + # Routing policy + identifier: "host1@www" + weight: 100 + health_check: "d994b780-3150-49fd-9205-356abdd42e75" ''' @@ -166,11 +252,33 @@ try: import boto.ec2 from boto import route53 from boto.route53 import Route53Connection - from boto.route53.record import ResourceRecordSets + from boto.route53.record import Record, ResourceRecordSets HAS_BOTO = True except ImportError: HAS_BOTO = False +def get_zone_by_name(conn, module, zone_name, want_private, zone_id, want_vpc_id): + """Finds a zone by name or zone_id""" + for zone in conn.get_zones(): + # only save this zone id if the private status of the zone matches + # the private_zone_in boolean specified in the params + private_zone = module.boolean(zone.config.get('PrivateZone', False)) + if private_zone == want_private and ((zone.name == zone_name and zone_id == None) or zone.id.replace('/hostedzone/', '') == zone_id): + if want_vpc_id: + # NOTE: These details aren't available in other boto methods, hence the necessary + # extra API call + zone_details = conn.get_hosted_zone(zone.id)['GetHostedZoneResponse'] + # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882 + if isinstance(zone_details['VPCs'], dict): + if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id: + return zone + else: # Forward compatibility for when boto fixes that bug + if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]: + return zone + else: + return zone + return None + def commit(changes, retry_interval): """Commit changes, but retry PriorRequestNotComplete errors.""" @@ -191,6 +299,7 @@ def main(): argument_spec.update(dict( command = dict(choices=['get', 'create', 'delete'], required=True), zone = dict(required=True), + hosted_zone_id = dict(required=False, default=None), record = dict(required=True), ttl = dict(required=False, type='int', default=3600), type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), @@ -200,6 +309,12 @@ def main(): overwrite = dict(required=False, type='bool'), retry_interval = dict(required=False, default=500), private_zone = dict(required=False, type='bool', default=False), + identifier = dict(required=False), + weight = dict(required=False, type='int'), + region = dict(required=False), + health_check = dict(required=False), + failover = dict(required=False), + vpc_id = dict(required=False), ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -209,6 +324,7 @@ def main(): command_in = module.params.get('command') zone_in = module.params.get('zone').lower() + hosted_zone_id_in = module.params.get('hosted_zone_id') ttl_in = module.params.get('ttl') record_in = module.params.get('record').lower() type_in = module.params.get('type') @@ -217,6 +333,12 @@ def main(): alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') retry_interval_in = module.params.get('retry_interval') private_zone_in = module.params.get('private_zone') + identifier_in = module.params.get('identifier') + weight_in = module.params.get('weight') + region_in = module.params.get('region') + health_check_in = module.params.get('health_check') + failover_in = module.params.get('failover') + vpc_id_in = module.params.get('vpc_id') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) @@ -224,7 +346,7 @@ def main(): if type(value_in) is str: if value_in: - value_list = sorted(value_in.split(',')) + value_list = sorted([s.strip() for s in value_in.split(',')]) elif type(value_in) is list: value_list = sorted(value_in) @@ -243,38 +365,45 @@ def main(): elif not alias_hosted_zone_id_in: module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete") + if vpc_id_in and not private_zone_in: + module.fail_json(msg="parameter 'private_zone' must be true when specifying parameter" + " 'vpc_id'") + + # connect to the route53 endpoint try: conn = Route53Connection(**aws_connect_kwargs) except boto.exception.BotoServerError, e: module.fail_json(msg = e.error_message) - # Get all the existing hosted zones and save their ID's - zones = {} - results = conn.get_all_hosted_zones() - for r53zone in results['ListHostedZonesResponse']['HostedZones']: - # only save this zone id if the private status of the zone matches - # the private_zone_in boolean specified in the params - if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in: - zone_id = r53zone['Id'].replace('/hostedzone/', '') - zones[r53zone['Name']] = zone_id + # Find the named zone ID + zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in, vpc_id_in) # Verify that the requested zone is already defined in Route53 - if not zone_in in zones: + if zone is None: errmsg = "Zone %s does not exist in Route53" % zone_in module.fail_json(msg = errmsg) record = {} found_record = False - sets = conn.get_all_rrsets(zones[zone_in]) + wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in, + identifier=identifier_in, weight=weight_in, region=region_in, + health_check=health_check_in, failover=failover_in) + for v in value_list: + if alias_in: + wanted_rset.set_alias(alias_hosted_zone_id_in, v) + else: + wanted_rset.add_value(v) + + sets = conn.get_all_rrsets(zone.id, name=record_in, type=type_in, identifier=identifier_in) for rset in sets: # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round # tripping of things like * and @. decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') - if rset.type == type_in and decoded_name.lower() == record_in.lower(): + if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in: found_record = True record['zone'] = zone_in record['type'] = rset.type @@ -282,6 +411,15 @@ def main(): record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) + if hosted_zone_id_in: + record['hosted_zone_id'] = hosted_zone_id_in + record['identifier'] = rset.identifier + record['weight'] = rset.weight + record['region'] = rset.region + record['failover'] = rset.failover + record['health_check'] = rset.health_check + if hosted_zone_id_in: + record['hosted_zone_id'] = hosted_zone_id_in if rset.alias_dns_name: record['alias'] = True record['value'] = rset.alias_dns_name @@ -291,35 +429,32 @@ def main(): record['alias'] = False record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) - if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create': + if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml(): module.exit_json(changed=False) + break if command_in == 'get': - module.exit_json(changed=False, set=record) + if type_in == 'NS': + ns = record['values'] + else: + # Retrieve name servers associated to the zone. + ns = conn.get_zone(zone_in).get_nameservers() + + module.exit_json(changed=False, set=record, nameservers=ns) if command_in == 'delete' and not found_record: module.exit_json(changed=False) - changes = ResourceRecordSets(conn, zones[zone_in]) - - if command_in == 'create' and found_record: - if not module.params['overwrite']: - module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it") - else: - change = changes.add_change("DELETE", record_in, type_in, record['ttl']) - for v in record['values']: - if record['alias']: - change.set_alias(record['alias_hosted_zone_id'], v) - else: - change.add_value(v) + changes = ResourceRecordSets(conn, zone.id) if command_in == 'create' or command_in == 'delete': - change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in) - for v in value_list: - if module.params['alias']: - change.set_alias(alias_hosted_zone_id_in, v) - else: - change.add_value(v) + if command_in == 'create' and found_record: + if not module.params['overwrite']: + module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it") + command = 'UPSERT' + else: + command = command_in.upper() + changes.add_change_record(command, wanted_rset) try: result = commit(changes, retry_interval_in) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index fddf3698694..e6b511b36b8 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -22,68 +22,119 @@ description: - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto. version_added: "1.1" options: - bucket: + aws_access_key: description: + - AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: ['ec2_secret_key', 'secret_key'] + bucket: + description: - Bucket name. required: true default: null aliases: [] + dest: + description: + - The destination file path when downloading an object/key with a GET operation. + required: false + aliases: [] + version_added: "1.3" + encrypt: + description: + - When set for PUT mode, asks for server-side encryption + required: false + default: no + version_added: "2.0" + expiration: + description: + - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation. + required: false + default: 600 + aliases: [] + marker: + description: + - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order. + required: false + default: null + version_added: "2.0" + max_keys: + description: + - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys. + required: false + default: 1000 + version_added: "2.0" + metadata: + description: + - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. + required: false + default: null + version_added: "1.6" + mode: + description: + - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys (2.0+)), create (bucket), delete (bucket), and delobj (delete object). + required: true + default: null + aliases: [] object: description: - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. required: false default: null - aliases: [] - version_added: "1.3" - src: + prefix: description: - - The source file path when performing a PUT operation. + - Limits the response to keys that begin with the specified prefix for list mode + required: false + default: null + version_added: "2.0" + version: + description: + - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket. required: false default: null aliases: [] - version_added: "1.3" - dest: - description: - - The destination file path when downloading an object/key with a GET operation. - required: false - aliases: [] - version_added: "1.3" + version_added: "2.0" overwrite: description: - - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. Starting with (v2.0) the valid values for this parameter are (always, never, different) and boolean is still accepted for backward compatibility, If the value set to (different) the file would be uploaded/downloaded only if the checksums are different. + - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. required: false - default: always + default: true version_added: "1.2" - mode: - description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket). - required: true - default: null - aliases: [] - expiration: - description: - - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation. - required: false - default: 600 - aliases: [] - s3_url: - description: - - "S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS" - default: null - aliases: [ S3_URL ] - metadata: - description: - - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. - required: false - default: null - version_added: "1.6" region: description: - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect." required: false default: null version_added: "1.8" -author: Lester Wade, Ralph Tice + retries: + description: + - On recoverable failure, how many times to retry before actually failing. + required: false + default: 0 + version_added: "2.0" + s3_url: + description: + - S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS + default: null + aliases: [ S3_URL ] + src: + description: + - The source file path when performing a PUT operation. + required: false + default: null + aliases: [] + version_added: "1.3" + +requirements: [ "boto" ] +author: + - "Lester Wade (@lwade)" + - "Ralph Tice (@ralph-tice)" extends_documentation_fragment: aws ''' @@ -94,9 +145,18 @@ EXAMPLES = ''' # Simple GET operation - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get +# Get a specific version of an object. +- s3: bucket=mybucket object=/my/desired/key.txt version=48c9ee5131af7a716edc22df9772aa6f dest=/usr/local/myfile.txt mode=get + # PUT/upload with metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' +# List keys simple +- s3: bucket=mybucket mode=list + +# List keys all options +- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472 + # Create an empty bucket - s3: bucket=mybucket mode=create @@ -106,16 +166,20 @@ EXAMPLES = ''' # Delete a bucket and all contents - s3: bucket=mybucket mode=delete -# GET an object but dont download if the file checksums match +# GET an object but dont download if the file checksums match - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different + +# Delete an object from a bucket +- s3: bucket=mybucket object=/my/desired/key.txt mode=delobj ''' import os import urlparse -import hashlib +from ssl import SSLError try: import boto + import boto.ec2 from boto.s3.connection import Location from boto.s3.connection import OrdinaryCallingFormat from boto.s3.connection import S3Connection @@ -123,20 +187,23 @@ try: except ImportError: HAS_BOTO = False -def key_check(module, s3, bucket, obj): +def key_check(module, s3, bucket, obj, version=None): try: bucket = s3.lookup(bucket) - key_check = bucket.get_key(obj) + key_check = bucket.get_key(obj, version_id=version) except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) + if version is not None and e.status == 400: # If a specified version doesn't exist a 400 is returned. + key_check = None + else: + module.fail_json(msg=str(e)) if key_check: return True else: return False -def keysum(module, s3, bucket, obj): +def keysum(module, s3, bucket, obj, version=None): bucket = s3.lookup(bucket) - key_check = bucket.get_key(obj) + key_check = bucket.get_key(obj, version_id=version) if not key_check: return None md5_remote = key_check.etag[1:-1] @@ -165,6 +232,19 @@ def create_bucket(module, s3, bucket, location=None): if bucket: return True +def get_bucket(module, s3, bucket): + try: + return s3.lookup(bucket) + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + +def list_keys(module, bucket_object, prefix, marker, max_keys): + all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys) + + keys = [x.key for x in all_keys] + + module.exit_json(msg="LIST operation complete", s3_keys=keys) + def delete_bucket(module, s3, bucket): try: bucket = s3.lookup(bucket) @@ -207,7 +287,8 @@ def path_check(path): else: return False -def upload_s3file(module, s3, bucket, obj, src, expiry, metadata): + +def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt): try: bucket = s3.lookup(bucket) key = bucket.new_key(obj) @@ -215,25 +296,34 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata): for meta_key in metadata.keys(): key.set_metadata(meta_key, metadata[meta_key]) - key.set_contents_from_filename(src) + key.set_contents_from_filename(src, encrypt_key=encrypt) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) except s3.provider.storage_copy_error, e: module.fail_json(msg= str(e)) -def download_s3file(module, s3, bucket, obj, dest): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - key.get_contents_to_filename(dest) - module.exit_json(msg="GET operation complete", changed=True) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) +def download_s3file(module, s3, bucket, obj, dest, retries, version=None): + # retries is the number of loops; range/xrange needs to be one + # more to get that count of loops. + bucket = s3.lookup(bucket) + key = bucket.get_key(obj, version_id=version) + for x in range(0, retries + 1): + try: + key.get_contents_to_filename(dest) + module.exit_json(msg="GET operation complete", changed=True) + except s3.provider.storage_copy_error, e: + module.fail_json(msg= str(e)) + except SSLError as e: + # actually fail on last pass through the loop. + if x >= retries: + module.fail_json(msg="s3 download failed; %s" % e) + # otherwise, try again, this may be a transient timeout. + pass -def download_s3str(module, s3, bucket, obj): +def download_s3str(module, s3, bucket, obj, version=None): try: bucket = s3.lookup(bucket) - key = bucket.lookup(obj) + key = bucket.get_key(obj, version_id=version) contents = key.get_contents_as_string() module.exit_json(msg="GET operation complete", contents=contents, changed=True) except s3.provider.storage_copy_error, e: @@ -265,18 +355,25 @@ def is_walrus(s3_url): else: return False + def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( bucket = dict(required=True), - object = dict(), - src = dict(), dest = dict(default=None), - mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True), + encrypt = dict(default=True, type='bool'), expiry = dict(default=600, aliases=['expiration']), - s3_url = dict(aliases=['S3_URL']), - overwrite = dict(aliases=['force'], default='always'), + marker = dict(default=None), + max_keys = dict(default=1000), metadata = dict(type='dict'), + mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), + object = dict(), + version = dict(default=None), + overwrite = dict(aliases=['force'], default='always'), + prefix = dict(default=None), + retries = dict(aliases=['retry'], type='int', default=0), + s3_url = dict(aliases=['S3_URL']), + src = dict(), ), ) module = AnsibleModule(argument_spec=argument_spec) @@ -285,15 +382,27 @@ def main(): module.fail_json(msg='boto required for this module') bucket = module.params.get('bucket') - obj = module.params.get('object') - src = module.params.get('src') + encrypt = module.params.get('encrypt') + expiry = int(module.params['expiry']) if module.params.get('dest'): dest = os.path.expanduser(module.params.get('dest')) - mode = module.params.get('mode') - expiry = int(module.params['expiry']) - s3_url = module.params.get('s3_url') - overwrite = module.params.get('overwrite') + marker = module.params.get('marker') + max_keys = module.params.get('max_keys') metadata = module.params.get('metadata') + mode = module.params.get('mode') + obj = module.params.get('object') + version = module.params.get('version') + overwrite = module.params.get('overwrite') + prefix = module.params.get('prefix') + retries = module.params.get('retries') + s3_url = module.params.get('s3_url') + src = module.params.get('src') + + if overwrite not in ['always', 'never', 'different']: + if module.boolean(overwrite): + overwrite = 'always' + else: + overwrite='never' if overwrite not in ['always', 'never', 'different']: if module.boolean(overwrite): @@ -356,29 +465,33 @@ def main(): module.fail_json(msg="Target bucket cannot be found", failed=True) # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. - keyrtn = key_check(module, s3, bucket, obj) + keyrtn = key_check(module, s3, bucket, obj, version=version) if keyrtn is False: - module.fail_json(msg="Target key cannot be found", failed=True) + if version is not None: + module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True) + else: + module.fail_json(msg="Key %s does not exist."%obj, failed=True) # If the destination path doesn't exist, no need to md5um etag check, so just download. pathrtn = path_check(dest) if pathrtn is False: - download_s3file(module, s3, bucket, obj, dest) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if pathrtn is True: - md5_remote = keysum(module, s3, bucket, obj) - md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() + md5_remote = keysum(module, s3, bucket, obj, version=version) + md5_local = module.md5(dest) if md5_local == md5_remote: sum_matches = True if overwrite == 'always': - download_s3file(module, s3, bucket, obj, dest) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) else: sum_matches = False + if overwrite in ('always', 'different'): - download_s3file(module, s3, bucket, obj, dest) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.") @@ -388,9 +501,7 @@ def main(): # At this point explicitly define the overwrite condition. if sum_matches is True and pathrtn is True and overwrite == 'always': - download_s3file(module, s3, bucket, obj, dest) - - # If sum does not match but the destination exists, we + download_s3file(module, s3, bucket, obj, dest, retries, version=version) # if our mode is a PUT operation (upload), go through the procedure as appropriate ... if mode == 'put': @@ -412,30 +523,47 @@ def main(): # Lets check key state. Does it exist and if it does, compute the etag md5sum. if bucketrtn is True and keyrtn is True: md5_remote = keysum(module, s3, bucket, obj) - md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() + md5_local = module.md5(src) + if md5_local == md5_remote: sum_matches = True if overwrite == 'always': - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) else: get_download_url(module, s3, bucket, obj, expiry, changed=False) else: sum_matches = False if overwrite in ('always', 'different'): - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.") # If neither exist (based on bucket existence), we can create both. if bucketrtn is False and pathrtn is True: create_bucket(module, s3, bucket, location) - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) # If bucket exists but key doesn't, just upload. if bucketrtn is True and pathrtn is True and keyrtn is False: - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) - # Support for deleting an object if we have both params. + # Delete an object from a bucket, not the entire bucket + if mode == 'delobj': + if obj is None: + module.fail_json(msg="object parameter is required", failed=True); + if bucket: + bucketrtn = bucket_check(module, s3, bucket) + if bucketrtn is True: + deletertn = delete_key(module, s3, bucket, obj) + if deletertn is True: + module.exit_json(msg="Object %s deleted from bucket %s." % (obj, bucket), changed=True) + else: + module.fail_json(msg="Bucket does not exist.", changed=False) + else: + module.fail_json(msg="Bucket parameter is required.", failed=True) + + + # Delete an entire bucket, including all objects in the bucket if mode == 'delete': if bucket: bucketrtn = bucket_check(module, s3, bucket) @@ -448,6 +576,16 @@ def main(): else: module.fail_json(msg="Bucket parameter is required.", failed=True) + # Support for listing a set of keys + if mode == 'list': + bucket_object = get_bucket(module, s3, bucket) + + # If the bucket does not exist then bail out + if bucket_object is None: + module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True) + + list_keys(module, bucket_object, prefix, marker, max_keys) + # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. if mode == 'create': @@ -494,11 +632,14 @@ def main(): if bucketrtn is False: module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) else: - keyrtn = key_check(module, s3, bucket, obj) + keyrtn = key_check(module, s3, bucket, obj, version=version) if keyrtn is True: - download_s3str(module, s3, bucket, obj) + download_s3str(module, s3, bucket, obj, version=version) else: - module.fail_json(msg="Key %s does not exist."%obj, failed=True) + if version is not None: + module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True) + else: + module.fail_json(msg="Key %s does not exist."%obj, failed=True) module.exit_json(failed=False) diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index 3303fc01867..c4fa41a6eb1 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -53,7 +53,7 @@ options: default: null role_size: description: - - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6) + - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location. required: false default: Small endpoints: @@ -110,11 +110,39 @@ options: required: false default: 'present' aliases: [] + reset_pass_atlogon: + description: + - Reset the admin password on first logon for windows hosts + required: false + default: "no" + version_added: "2.0" + choices: [ "yes", "no" ] + auto_updates: + description: + - Enable Auto Updates on Windows Machines + required: false + version_added: "2.0" + default: "no" + choices: [ "yes", "no" ] + enable_winrm: + description: + - Enable winrm on Windows Machines + required: false + version_added: "2.0" + default: "yes" + choices: [ "yes", "no" ] + os_type: + description: + - The type of the os that is gettings provisioned + required: false + version_added: "2.0" + default: "linux" + choices: [ "windows", "linux" ] requirements: - "python >= 2.6" - "azure >= 0.7.1" -author: John Whitbeck +author: "John Whitbeck (@jwhitbeck)" ''' EXAMPLES = ''' @@ -138,6 +166,29 @@ EXAMPLES = ''' module: azure name: my-virtual-machine state: absent + +#Create windows machine +- hosts: all + connection: local + tasks: + - local_action: + module: azure + name: "ben-Winows-23" + hostname: "win123" + os_type: windows + enable_winrm: yes + subscription_id: "{{ azure_sub_id }}" + management_cert_path: "{{ azure_cert_path }}" + role_size: Small + image: 'bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5' + location: 'East Asia' + password: "xxx" + storage_account: benooytes + user: admin + wait: yes + virtual_network_name: "{{ vnet_name }}" + + ''' import base64 @@ -184,6 +235,14 @@ AZURE_ROLE_SIZES = ['ExtraSmall', 'Standard_D12', 'Standard_D13', 'Standard_D14', + 'Standard_DS1', + 'Standard_DS2', + 'Standard_DS3', + 'Standard_DS4', + 'Standard_DS11', + 'Standard_DS12', + 'Standard_DS13', + 'Standard_DS14', 'Standard_G1', 'Standard_G2', 'Standard_G3', @@ -196,7 +255,7 @@ try: from azure import WindowsAzureError, WindowsAzureMissingResourceError from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys, PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints, - ConfigurationSetInputEndpoint) + ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet) HAS_AZURE = True except ImportError: HAS_AZURE = False @@ -264,6 +323,7 @@ def create_virtual_machine(module, azure): True if a new virtual machine and/or cloud service was created, false otherwise """ name = module.params.get('name') + os_type = module.params.get('os_type') hostname = module.params.get('hostname') or name + ".cloudapp.net" endpoints = module.params.get('endpoints').split(',') ssh_cert_path = module.params.get('ssh_cert_path') @@ -295,10 +355,21 @@ def create_virtual_machine(module, azure): azure.get_role(name, name, name) except WindowsAzureMissingResourceError: # vm does not exist; create it - - # Create linux configuration - disable_ssh_password_authentication = not password - linux_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication) + + if os_type == 'linux': + # Create linux configuration + disable_ssh_password_authentication = not password + vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication) + else: + #Create Windows Config + vm_config = WindowsConfigurationSet(hostname, password, module.params.get('reset_pass_atlogon'),\ + module.params.get('auto_updates'), None, user) + vm_config.domain_join = None + if module.params.get('enable_winrm'): + listener = Listener('Http') + vm_config.win_rm.listeners.listeners.append(listener) + else: + vm_config.win_rm = None # Add ssh certificates if specified if ssh_cert_path: @@ -313,7 +384,7 @@ def create_virtual_machine(module, azure): authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint)) # Append ssh config to linux machine config - linux_config.ssh = ssh_config + vm_config.ssh = ssh_config # Create network configuration network_config = ConfigurationSetInputEndpoints() @@ -340,7 +411,7 @@ def create_virtual_machine(module, azure): deployment_slot='production', label=name, role_name=name, - system_config=linux_config, + system_config=vm_config, network_config=network_config, os_virtual_hard_disk=os_hd, role_size=role_size, @@ -448,6 +519,7 @@ def main(): ssh_cert_path=dict(), name=dict(), hostname=dict(), + os_type=dict(default='linux', choices=['linux', 'windows']), location=dict(choices=AZURE_LOCATIONS), role_size=dict(choices=AZURE_ROLE_SIZES), subscription_id=dict(no_log=True), @@ -461,7 +533,10 @@ def main(): state=dict(default='present'), wait=dict(type='bool', default=False), wait_timeout=dict(default=600), - wait_timeout_redirects=dict(default=300) + wait_timeout_redirects=dict(default=300), + reset_pass_atlogon=dict(type='bool', default=False), + auto_updates=dict(type='bool', default=False), + enable_winrm=dict(type='bool', default=True), ) ) if not HAS_AZURE: @@ -479,7 +554,7 @@ def main(): cloud_service_raw = None if module.params.get('state') == 'absent': (changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure) - + elif module.params.get('state') == 'present': # Changed is always set to true when provisioning new instances if not module.params.get('name'): @@ -492,8 +567,8 @@ def main(): module.fail_json(msg='location parameter is required for new instance') if not module.params.get('storage_account'): module.fail_json(msg='storage_account parameter is required for new instance') - if not module.params.get('password'): - module.fail_json(msg='password parameter is required for new instance') + if not (module.params.get('password') or module.params.get('ssh_cert_path')): + module.fail_json(msg='password or ssh_cert_path parameter is required for new instance') (changed, public_dns_name, deployment) = create_virtual_machine(module, azure) module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__))) diff --git a/cloud/digital_ocean/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py index f4475a104a7..d7b55bee693 100644 --- a/cloud/digital_ocean/digital_ocean.py +++ b/cloud/digital_ocean/digital_ocean.py @@ -22,6 +22,7 @@ short_description: Create/delete a droplet/SSH_key in DigitalOcean description: - Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key. version_added: "1.3" +author: "Vincent Viallet (@zbal)" options: command: description: diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py index bf6bf8679b0..905b6dae2d0 100644 --- a/cloud/digital_ocean/digital_ocean_domain.py +++ b/cloud/digital_ocean/digital_ocean_domain.py @@ -22,6 +22,7 @@ short_description: Create/delete a DNS record in DigitalOcean description: - Create/delete a DNS record in DigitalOcean. version_added: "1.6" +author: "Michael Gregson (@mgregson)" options: state: description: diff --git a/cloud/digital_ocean/digital_ocean_sshkey.py b/cloud/digital_ocean/digital_ocean_sshkey.py index 1304c756422..a509276bc48 100644 --- a/cloud/digital_ocean/digital_ocean_sshkey.py +++ b/cloud/digital_ocean/digital_ocean_sshkey.py @@ -22,6 +22,7 @@ short_description: Create/delete an SSH key in DigitalOcean description: - Create/delete an SSH key. version_added: "1.6" +author: "Michael Gregson (@mgregson)" options: state: description: diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index cb6d3dae075..035766fac77 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -59,10 +59,10 @@ options: version_added: "1.5" ports: description: - - List containing private to public port mapping specification. Use docker - - 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' - - where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - - a host interface. + - "List containing private to public port mapping specification. + Use docker 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' + where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - a host interface. + The container ports need to be exposed either in the Dockerfile or via the C(expose) option." default: null version_added: "1.5" expose: @@ -92,6 +92,23 @@ options: - 'alias. Use docker CLI-style syntax: C(redis:myredis).' default: null version_added: "1.5" + log_driver: + description: + - You can specify a different logging driver for the container than for the daemon. + "json-file" Default logging driver for Docker. Writes JSON messages to file. + docker logs command is available only for this logging driver. + "none" disables any logging for the container. docker logs won't be available with this driver. + "syslog" Syslog logging driver for Docker. Writes log messages to syslog. + docker logs command is not available for this logging driver. + If not defined explicitly, the Docker daemon's default ("json-file") will apply. + Requires docker >= 1.6.0. + required: false + default: json-file + choices: + - json-file + - none + - syslog + version_added: "2.0" memory_limit: description: - RAM allocated to the container as a number of bytes or as a human-readable @@ -143,6 +160,12 @@ options: specified by docker-py. default: docker-py default remote API version version_added: "1.8" + docker_user: + description: + - Username or UID to use within the container + required: false + default: null + version_added: "2.0" username: description: - Remote API username. @@ -174,8 +197,16 @@ options: default: null detach: description: - - Enable detached mode to leave the container running in background. + - Enable detached mode to leave the container running in background. If + disabled, fail unless the process exits cleanly. default: true + signal: + version_added: "2.0" + description: + - With the state "killed", you can alter the signal sent to the + container. + required: false + default: KILL state: description: - Assert the container's desired state. "present" only asserts that the @@ -234,6 +265,12 @@ options: default: DockerHub aliases: [] version_added: "1.8" + read_only: + description: + - Mount the container's root filesystem as read only + default: false + aliases: [] + version_added: "2.0" restart_policy: description: - Container restart policy. @@ -255,8 +292,30 @@ options: docker-py >= 0.5.0. default: false version_added: "1.9" - -author: Cove Schneider, Joshua Conner, Pavel Antonov, Ash Wilson + cpu_set: + description: + - CPUs in which to allow execution. Requires docker-py >= 0.6.0. + required: false + default: null + version_added: "2.0" + cap_add: + description: + - Add capabilities for the container. Requires docker-py >= 0.5.0. + required: false + default: false + version_added: "2.0" + cap_drop: + description: + - Drop capabilities for the container. Requires docker-py >= 0.5.0. + required: false + default: false + aliases: [] + version_added: "2.0" +author: + - "Cove Schneider (@cove)" + - "Joshua Conner (@joshuaconner)" + - "Pavel Antonov (@softzilla)" + - "Ash Wilson (@smashwilson)" requirements: - "python >= 2.6" - "docker-py >= 0.3.0" @@ -367,6 +426,7 @@ from urlparse import urlparse try: import docker.client import docker.utils + import docker.errors from requests.exceptions import RequestException except ImportError: HAS_DOCKER_PY = False @@ -506,6 +566,12 @@ class DockerManager(object): 'restart_policy': ((0, 5, 0), '1.14'), 'extra_hosts': ((0, 7, 0), '1.3.1'), 'pid': ((1, 0, 0), '1.17'), + 'log_driver': ((1, 2, 0), '1.18'), + 'host_config': ((0, 7, 0), '1.15'), + 'cpu_set': ((0, 6, 0), '1.14'), + 'cap_add': ((0, 5, 0), '1.14'), + 'cap_drop': ((0, 5, 0), '1.14'), + 'read_only': ((1, 0, 0), '1.17'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -517,24 +583,26 @@ class DockerManager(object): self.volumes = None if self.module.params.get('volumes'): self.binds = {} - self.volumes = {} + self.volumes = [] vols = self.module.params.get('volumes') for vol in vols: parts = vol.split(":") + # regular volume + if len(parts) == 1: + self.volumes.append(parts[0]) # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) - if len(parts) == 2: - self.volumes[parts[1]] = {} - self.binds[parts[0]] = parts[1] - # with bind mode - elif len(parts) == 3: - if parts[2] not in ['ro', 'rw']: - self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') - ro = parts[2] == 'ro' - self.volumes[parts[1]] = {} - self.binds[parts[0]] = {'bind': parts[1], 'ro': ro} - # docker mount (e.g. /www, mounts a docker volume /www on the container at the same location) + elif 2 <= len(parts) <= 3: + # default to read-write + ro = False + # with supplied bind mode + if len(parts) == 3: + if parts[2] not in ['ro', 'rw']: + self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') + else: + ro = parts[2] == 'ro' + self.binds[parts[0]] = {'bind': parts[1], 'ro': ro } else: - self.volumes[parts[0]] = {} + self.module.fail_json(msg='volumes support 1 to 3 arguments') self.lxc_conf = None if self.module.params.get('lxc_conf'): @@ -713,6 +781,53 @@ class DockerManager(object): else: return None + def get_start_params(self): + """ + Create start params + """ + params = { + 'lxc_conf': self.lxc_conf, + 'binds': self.binds, + 'port_bindings': self.port_bindings, + 'publish_all_ports': self.module.params.get('publish_all_ports'), + 'privileged': self.module.params.get('privileged'), + 'links': self.links, + 'network_mode': self.module.params.get('net'), + 'read_only': self.module.params.get('read_only'), + } + + optionals = {} + for optional_param in ('dns', 'volumes_from', 'restart_policy', + 'restart_policy_retry', 'pid'): + optionals[optional_param] = self.module.params.get(optional_param) + + if optionals['dns'] is not None: + self.ensure_capability('dns') + params['dns'] = optionals['dns'] + + if optionals['volumes_from'] is not None: + self.ensure_capability('volumes_from') + params['volumes_from'] = optionals['volumes_from'] + + if optionals['restart_policy'] is not None: + self.ensure_capability('restart_policy') + params['restart_policy'] = { 'Name': optionals['restart_policy'] } + if params['restart_policy']['Name'] == 'on-failure': + params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + + if optionals['pid'] is not None: + self.ensure_capability('pid') + params['pid_mode'] = optionals['pid'] + + return params + + def get_host_config(self): + """ + Create HostConfig object + """ + params = self.get_start_params() + return docker.utils.create_host_config(**params) + def get_port_bindings(self, ports): """ Parse the `ports` string into a port bindings dict for the `start_container` call. @@ -1041,15 +1156,14 @@ class DockerManager(object): for container_port, config in self.port_bindings.iteritems(): if isinstance(container_port, int): container_port = "{0}/tcp".format(container_port) - bind = {} if len(config) == 1: - bind['HostIp'] = "0.0.0.0" - bind['HostPort'] = "" + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for hostip, hostport in config: + expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)}) else: - bind['HostIp'] = config[0] - bind['HostPort'] = str(config[1]) - - expected_bound_ports[container_port] = [bind] + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] actual_bound_ports = container['HostConfig']['PortBindings'] or {} @@ -1086,8 +1200,8 @@ class DockerManager(object): # NETWORK MODE - expected_netmode = self.module.params.get('net') or '' - actual_netmode = container['HostConfig']['NetworkMode'] + expected_netmode = self.module.params.get('net') or 'bridge' + actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge' if actual_netmode != expected_netmode: self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) differing.append(container) @@ -1110,6 +1224,16 @@ class DockerManager(object): self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) differing.append(container) + # LOG_DRIVER + + if self.ensure_capability('log_driver', False) : + expected_log_driver = self.module.params.get('log_driver') or 'json-file' + actual_log_driver = container['HostConfig']['LogConfig']['Type'] + if actual_log_driver != expected_log_driver: + self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) + differing.append(container) + continue + return differing def get_deployed_containers(self): @@ -1206,44 +1330,7 @@ class DockerManager(object): except Exception as e: self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) - def create_containers(self, count=1): - try: - mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) - except ValueError as e: - self.module.fail_json(msg=str(e)) - - params = {'image': self.module.params.get('image'), - 'command': self.module.params.get('command'), - 'ports': self.exposed_ports, - 'volumes': self.volumes, - 'mem_limit': mem_limit, - 'environment': self.env, - 'hostname': self.module.params.get('hostname'), - 'domainname': self.module.params.get('domainname'), - 'detach': self.module.params.get('detach'), - 'name': self.module.params.get('name'), - 'stdin_open': self.module.params.get('stdin_open'), - 'tty': self.module.params.get('tty'), - } - - def do_create(count, params): - results = [] - for _ in range(count): - result = self.client.create_container(**params) - self.increment_counter('created') - results.append(result) - - return results - - try: - containers = do_create(count, params) - except: - self.pull_image() - containers = do_create(count, params) - - return containers - - def start_containers(self, containers): + def create_host_config(self): params = { 'lxc_conf': self.lxc_conf, 'binds': self.binds, @@ -1256,7 +1343,8 @@ class DockerManager(object): optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid', 'extra_hosts'): + 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', + 'cap_add', 'cap_drop'): optionals[optional_param] = self.module.params.get(optional_param) if optionals['dns'] is not None: @@ -1273,6 +1361,10 @@ class DockerManager(object): if params['restart_policy']['Name'] == 'on-failure': params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + # docker_py only accepts 'host' or None + if 'pid' in optionals and not optionals['pid']: + optionals['pid'] = None + if optionals['pid'] is not None: self.ensure_capability('pid') params['pid_mode'] = optionals['pid'] @@ -1281,10 +1373,79 @@ class DockerManager(object): self.ensure_capability('extra_hosts') params['extra_hosts'] = optionals['extra_hosts'] + if optionals['log_driver'] is not None: + self.ensure_capability('log_driver') + log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) + log_config.type = optionals['log_driver'] + params['log_config'] = log_config + + if optionals['cap_add'] is not None: + self.ensure_capability('cap_add') + params['cap_add'] = optionals['cap_add'] + + if optionals['cap_drop'] is not None: + self.ensure_capability('cap_drop') + params['cap_drop'] = optionals['cap_drop'] + + return docker.utils.create_host_config(**params) + + def create_containers(self, count=1): + params = {'image': self.module.params.get('image'), + 'command': self.module.params.get('command'), + 'ports': self.exposed_ports, + 'volumes': self.volumes, + 'environment': self.env, + 'hostname': self.module.params.get('hostname'), + 'domainname': self.module.params.get('domainname'), + 'detach': self.module.params.get('detach'), + 'name': self.module.params.get('name'), + 'stdin_open': self.module.params.get('stdin_open'), + 'tty': self.module.params.get('tty'), + 'cpuset': self.module.params.get('cpu_set'), + 'host_config': self.create_host_config(), + 'user': self.module.params.get('docker_user'), + } + + if self.ensure_capability('host_config', fail=False): + params['host_config'] = self.get_host_config() + + def do_create(count, params): + results = [] + for _ in range(count): + result = self.client.create_container(**params) + self.increment_counter('created') + results.append(result) + + return results + + try: + containers = do_create(count, params) + except docker.errors.APIError as e: + if e.response.status_code != 404: + raise + + self.pull_image() + containers = do_create(count, params) + + return containers + + def start_containers(self, containers): + params = {} + + if not self.ensure_capability('host_config', fail=False): + params = self.get_start_params() + for i in containers: - self.client.start(i['Id'], **params) + self.client.start(i) self.increment_counter('started') + if not self.module.params.get('detach'): + status = self.client.wait(i['Id']) + if status != 0: + output = self.client.logs(i['Id'], stdout=True, stderr=True, + stream=False, timestamps=False) + self.module.fail_json(status=status, msg=output) + def stop_containers(self, containers): for i in containers: self.client.stop(i['Id']) @@ -1299,7 +1460,7 @@ class DockerManager(object): def kill_containers(self, containers): for i in containers: - self.client.kill(i['Id']) + self.client.kill(i['Id'], self.module.params.get('signal')) self.increment_counter('killed') def restart_containers(self, containers): @@ -1453,6 +1614,7 @@ def main(): tls_ca_cert = dict(required=False, default=None, type='str'), tls_hostname = dict(required=False, type='str', default=None), docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'), + docker_user = dict(default=None), username = dict(default=None), password = dict(), email = dict(), @@ -1463,6 +1625,7 @@ def main(): dns = dict(), detach = dict(default=True, type='bool'), state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']), + signal = dict(default=None), restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), restart_policy_retry = dict(default=0, type='int'), extra_hosts = dict(type='dict'), @@ -1475,6 +1638,11 @@ def main(): net = dict(default=None), pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), + log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), + cpu_set = dict(default=None), + cap_add = dict(default=None, type='list'), + cap_drop = dict(default=None, type='list'), + read_only = dict(default=False, type='bool'), ), required_together = ( ['tls_client_cert', 'tls_client_key'], @@ -1500,10 +1668,14 @@ def main(): if count > 1 and name: module.fail_json(msg="Count and name must not be used together") - # Explicitly pull new container images, if requested. - # Do this before noticing running and deployed containers so that the image names will differ - # if a newer image has been pulled. - if pull == "always": + # Explicitly pull new container images, if requested. Do this before + # noticing running and deployed containers so that the image names + # will differ if a newer image has been pulled. + # Missing images should be pulled first to avoid downtime when old + # container is stopped, but image for new one is now downloaded yet. + # It also prevents removal of running container before realizing + # that requested image cannot be retrieved. + if pull == "always" or (state == 'reloaded' and manager.get_inspect_image() is None): manager.pull_image() containers = ContainerSet(manager) @@ -1532,7 +1704,7 @@ def main(): summary=manager.counters, containers=containers.changed, reload_reasons=manager.get_reload_reason_message(), - ansible_facts=_ansible_facts(containers.changed)) + ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed))) except DockerAPIError as e: module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index faf47cd6e09..09fc61e6b08 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -23,7 +23,7 @@ DOCUMENTATION = ''' --- module: docker_image -author: Pavel Antonov +author: "Pavel Antonov (@softzilla)" version_added: "1.5" short_description: manage docker images description: @@ -65,6 +65,12 @@ options: required: false default: unix://var/run/docker.sock aliases: [] + docker_api_version: + description: + - Remote API version to use. This defaults to the current default as + specified by docker-py. + default: docker-py default remote API version + version_added: "2.0" state: description: - Set the state of the image @@ -137,6 +143,14 @@ if HAS_DOCKER_CLIENT: except ImportError: from docker.client import APIError as DockerAPIError + try: + # docker-py 1.2+ + import docker.constants + DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION + except (ImportError, AttributeError): + # docker-py less than 1.2 + DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION + class DockerImageManager: def __init__(self, module): @@ -147,7 +161,10 @@ class DockerImageManager: self.tag = self.module.params.get('tag') self.nocache = self.module.params.get('nocache') docker_url = urlparse(module.params.get('docker_url')) - self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout')) + self.client = docker.Client( + base_url=docker_url.geturl(), + version=module.params.get('docker_api_version'), + timeout=module.params.get('timeout')) self.changed = False self.log = [] self.error_msg = None @@ -220,14 +237,17 @@ class DockerImageManager: def main(): module = AnsibleModule( argument_spec = dict( - path = dict(required=False, default=None), - dockerfile = dict(required=False, default="Dockerfile"), - name = dict(required=True), - tag = dict(required=False, default="latest"), - nocache = dict(default=False, type='bool'), - state = dict(default='present', choices=['absent', 'present', 'build']), - docker_url = dict(default='unix://var/run/docker.sock'), - timeout = dict(default=600, type='int'), + path = dict(required=False, default=None), + dockerfile = dict(required=False, default="Dockerfile"), + name = dict(required=True), + tag = dict(required=False, default="latest"), + nocache = dict(default=False, type='bool'), + state = dict(default='present', choices=['absent', 'present', 'build']), + docker_url = dict(default='unix://var/run/docker.sock'), + docker_api_version = dict(required=False, + default=DEFAULT_DOCKER_API_VERSION, + type='str'), + timeout = dict(default=600, type='int'), ) ) if not HAS_DOCKER_CLIENT: diff --git a/cloud/google/gc_storage.py b/cloud/google/gc_storage.py index 5e0c5e982e8..c1e6f5707a6 100644 --- a/cloud/google/gc_storage.py +++ b/cloud/google/gc_storage.py @@ -84,7 +84,7 @@ requirements: - "python >= 2.6" - "boto >= 2.9" -author: benno@ansible.com Note. Most of the code has been taken from the S3 module. +author: "Benno Joy (@bennojoy)" ''' @@ -284,7 +284,7 @@ def get_download_url(module, gs, bucket, obj, expiry): def handle_get(module, gs, bucket, obj, overwrite, dest): md5_remote = keysum(module, gs, bucket, obj) - md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() + md5_local = module.md5(dest) if md5_local == md5_remote: module.exit_json(changed=False) if md5_local != md5_remote and not overwrite: @@ -300,7 +300,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration): # Lets check key state. Does it exist and if it does, compute the etag md5sum. if bucket_rc and key_rc: md5_remote = keysum(module, gs, bucket, obj) - md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() + md5_local = module.md5(src) if md5_local == md5_remote: module.exit_json(msg="Local and remote object are identical", changed=False) if md5_local != md5_remote and not overwrite: diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 314f1200161..60287ad8b6e 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -58,6 +58,14 @@ options: required: false default: null aliases: [] + service_account_permissions: + version_added: 2.0 + description: + - service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information) + required: false + default: null + aliases: [] + choices: ["bigquery", "cloud-platform", "compute-ro", "compute-rw", "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email"] pem_file: version_added: 1.5.1 description: @@ -142,7 +150,7 @@ requirements: - "apache-libcloud >= 0.13.3" notes: - Either I(name) or I(instance_names) is required. -author: Eric Johnson +author: "Eric Johnson (@erjohnso) " ''' EXAMPLES = ''' @@ -287,6 +295,8 @@ def create_instances(module, gce, instance_names): ip_forward = module.params.get('ip_forward') external_ip = module.params.get('external_ip') disk_auto_delete = module.params.get('disk_auto_delete') + service_account_permissions = module.params.get('service_account_permissions') + service_account_email = module.params.get('service_account_email') if external_ip == "none": external_ip = None @@ -317,7 +327,7 @@ def create_instances(module, gce, instance_names): # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] if metadata: try: - md = literal_eval(metadata) + md = literal_eval(str(metadata)) if not isinstance(md, dict): raise ValueError('metadata must be a dict') except ValueError, e: @@ -330,6 +340,20 @@ def create_instances(module, gce, instance_names): items.append({"key": k,"value": v}) metadata = {'items': items} + ex_sa_perms = [] + bad_perms = [] + if service_account_permissions: + for perm in service_account_permissions: + if not perm in gce.SA_SCOPES_MAP.keys(): + bad_perms.append(perm) + if len(bad_perms) > 0: + module.fail_json(msg='bad permissions: %s' % str(bad_perms)) + if service_account_email: + ex_sa_perms.append({'email': service_account_email}) + else: + ex_sa_perms.append({'email': "default"}) + ex_sa_perms[0]['scopes'] = service_account_permissions + # These variables all have default values but check just in case if not lc_image or not lc_network or not lc_machine_type or not lc_zone: module.fail_json(msg='Missing required create instance variable', @@ -349,7 +373,7 @@ def create_instances(module, gce, instance_names): inst = gce.create_node(name, lc_machine_type, lc_image, location=lc_zone, ex_network=network, ex_tags=tags, ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward, - external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete) + external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, ex_service_accounts=ex_sa_perms) changed = True except ResourceExistsError: inst = gce.ex_get_node(name, lc_zone) @@ -437,6 +461,7 @@ def main(): tags = dict(type='list'), zone = dict(default='us-central1-a'), service_account_email = dict(), + service_account_permissions = dict(type='list'), pem_file = dict(), project_id = dict(), ip_forward = dict(type='bool', default=False), diff --git a/cloud/google/gce_lb.py b/cloud/google/gce_lb.py index df6f9d3d65f..6a264839e50 100644 --- a/cloud/google/gce_lb.py +++ b/cloud/google/gce_lb.py @@ -134,7 +134,7 @@ options: requirements: - "python >= 2.6" - "apache-libcloud >= 0.13.3" -author: Eric Johnson +author: "Eric Johnson (@erjohnso) " ''' EXAMPLES = ''' diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 079891c5e10..3ae1635ded7 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -75,7 +75,7 @@ options: aliases: [] state: description: - - desired state of the persistent disk + - desired state of the network or firewall required: false default: "present" choices: ["active", "present", "absent", "deleted"] @@ -105,7 +105,7 @@ options: requirements: - "python >= 2.6" - "apache-libcloud >= 0.13.3" -author: Eric Johnson +author: "Eric Johnson (@erjohnso) " ''' EXAMPLES = ''' @@ -264,7 +264,7 @@ def main(): if fw: gce.ex_destroy_firewall(fw) changed = True - if name: + elif name: json_output['name'] = name network = None try: diff --git a/cloud/google/gce_pd.py b/cloud/google/gce_pd.py index 9e2e173c530..2d70c9b335a 100644 --- a/cloud/google/gce_pd.py +++ b/cloud/google/gce_pd.py @@ -120,7 +120,7 @@ options: requirements: - "python >= 2.6" - "apache-libcloud >= 0.13.3" -author: Eric Johnson +author: "Eric Johnson (@erjohnso) " ''' EXAMPLES = ''' diff --git a/cloud/linode/linode.py b/cloud/linode/linode.py index dac22f7f2cb..9ebc770a47c 100644 --- a/cloud/linode/linode.py +++ b/cloud/linode/linode.py @@ -92,7 +92,7 @@ requirements: - "python >= 2.6" - "linode-python" - "pycurl" -author: Vincent Viallet +author: "Vincent Viallet (@zbal)" notes: - LINODE_API_KEY env variable can be used instead ''' diff --git a/cloud/openstack/README.md b/cloud/openstack/README.md new file mode 100644 index 00000000000..4a872b11954 --- /dev/null +++ b/cloud/openstack/README.md @@ -0,0 +1,56 @@ +OpenStack Ansible Modules +========================= + +These are a set of modules for interacting with OpenStack as either an admin +or an end user. If the module does not begin with os_, it's either deprecated +or soon to be. This document serves as developer coding guidelines for +modules intended to be here. + +Naming +------ + +* All modules should start with os_ +* If the module is one that a cloud consumer would expect to use, it should be + named after the logical resource it manages. Thus, os\_server not os\_nova. + The reasoning for this is that there are more than one resource that are + managed by more than one service and which one manages it is a deployment + detail. A good example of this are floating IPs, which can come from either + Nova or Neutron, but which one they come from is immaterial to an end user. +* If the module is one that a cloud admin would expect to use, it should be + be named with the service and the resouce, such as os\_keystone\_domain. +* If the module is one that a cloud admin and a cloud consumer could both use, + the cloud consumer rules apply. + +Interface +--------- + +* If the resource being managed has an id, it should be returned. +* If the resource being managed has an associated object more complex than + an id, it should also be returned. + +Interoperability +---------------- + +* It should be assumed that the cloud consumer does not know a bazillion + details about the deployment choices their cloud provider made, and a best + effort should be made to present one sane interface to the ansible user + regardless of deployer insanity. +* All modules should work appropriately against all existing known public + OpenStack clouds. +* It should be assumed that a user may have more than one cloud account that + they wish to combine as part of a single ansible managed infrastructure. + +Libraries +--------- + +* All modules should use openstack\_full\_argument\_spec to pick up the + standard input such as auth and ssl support. +* All modules should extends\_documentation\_fragment: openstack to go along + with openstack\_full\_argument\_spec. +* All complex cloud interaction or interoperability code should be housed in + the [shade](http://git.openstack.org/cgit/openstack-infra/shade) library. +* All OpenStack API interactions should happen via shade and not via + OpenStack Client libraries. The OpenStack Client libraries do no have end + users as a primary audience, they are for intra-server communication. The + python-openstacksdk is the future there, and shade will migrate to it when + its ready in a manner that is not noticable to ansible users. diff --git a/cloud/openstack/glance_image.py b/cloud/openstack/_glance_image.py similarity index 99% rename from cloud/openstack/glance_image.py rename to cloud/openstack/_glance_image.py index 97b89f03484..0f2de791b38 100644 --- a/cloud/openstack/glance_image.py +++ b/cloud/openstack/_glance_image.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' --- module: glance_image version_added: "1.2" +deprecated: Deprecated in 1.10. Use os_image instead short_description: Add/Delete images from glance description: - Add or Remove images from the glance repository. diff --git a/cloud/openstack/nova_keypair.py b/cloud/openstack/_nova_keypair.py similarity index 97% rename from cloud/openstack/nova_keypair.py rename to cloud/openstack/_nova_keypair.py index 686484cf37f..68df0c5a2c4 100644 --- a/cloud/openstack/nova_keypair.py +++ b/cloud/openstack/_nova_keypair.py @@ -29,6 +29,10 @@ DOCUMENTATION = ''' --- module: nova_keypair version_added: "1.2" +author: + - "Benno Joy (@bennojoy)" + - "Michael DeHaan" +deprecated: Deprecated in 2.0. Use os_keypair instead short_description: Add/Delete key pair from nova description: - Add or Remove key pair from nova . @@ -97,7 +101,7 @@ def main(): state = dict(default='present', choices=['absent', 'present']) )) module = AnsibleModule(argument_spec=argument_spec) - if not HAVE_NOVACLIENT: + if not HAS_NOVACLIENT: module.fail_json(msg='python-novaclient is required for this module to work') nova = nova_client.Client(module.params['login_username'], diff --git a/cloud/openstack/quantum_floating_ip.py b/cloud/openstack/_quantum_floating_ip.py similarity index 98% rename from cloud/openstack/quantum_floating_ip.py rename to cloud/openstack/_quantum_floating_ip.py index e89f23caa79..5220d307844 100644 --- a/cloud/openstack/quantum_floating_ip.py +++ b/cloud/openstack/_quantum_floating_ip.py @@ -33,6 +33,10 @@ DOCUMENTATION = ''' --- module: quantum_floating_ip version_added: "1.2" +author: + - "Benno Joy (@bennojoy)" + - "Brad P. Crochet (@bcrochet)" +deprecated: Deprecated in 2.0. Use os_floating_ip instead short_description: Add/Remove floating IP from an instance description: - Add or Remove a floating IP to an instance diff --git a/cloud/openstack/quantum_floating_ip_associate.py b/cloud/openstack/_quantum_floating_ip_associate.py similarity index 98% rename from cloud/openstack/quantum_floating_ip_associate.py rename to cloud/openstack/_quantum_floating_ip_associate.py index b7e9f71e5fd..8960e247b0f 100644 --- a/cloud/openstack/quantum_floating_ip_associate.py +++ b/cloud/openstack/_quantum_floating_ip_associate.py @@ -32,6 +32,8 @@ DOCUMENTATION = ''' --- module: quantum_floating_ip_associate version_added: "1.2" +author: "Benno Joy (@bennojoy)" +deprecated: Deprecated in 2.0. Use os_floating_ip instead short_description: Associate or disassociate a particular floating IP with an instance description: - Associates or disassociates a specific floating IP with a particular instance diff --git a/cloud/openstack/quantum_network.py b/cloud/openstack/_quantum_network.py similarity index 99% rename from cloud/openstack/quantum_network.py rename to cloud/openstack/_quantum_network.py index ff8b2683f37..93b10880823 100644 --- a/cloud/openstack/quantum_network.py +++ b/cloud/openstack/_quantum_network.py @@ -30,6 +30,7 @@ DOCUMENTATION = ''' --- module: quantum_network version_added: "1.4" +deprecated: Deprecated in 2.0. Use os_network instead short_description: Creates/Removes networks from OpenStack description: - Add or Remove network from OpenStack. diff --git a/cloud/openstack/keystone_user.py b/cloud/openstack/keystone_user.py index 89afe53fbd4..de5eed598c7 100644 --- a/cloud/openstack/keystone_user.py +++ b/cloud/openstack/keystone_user.py @@ -75,7 +75,7 @@ options: requirements: - "python >= 2.6" - python-keystoneclient -author: Lorin Hochstein +author: "Lorin Hochstein (@lorin)" ''' EXAMPLES = ''' diff --git a/cloud/openstack/os_auth.py b/cloud/openstack/os_auth.py index ec0e8414fd2..a881c217805 100644 --- a/cloud/openstack/os_auth.py +++ b/cloud/openstack/os_auth.py @@ -27,6 +27,7 @@ DOCUMENTATION = ''' module: os_auth short_description: Retrieve an auth token version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Retrieve an auth token from an OpenStack Cloud requirements: diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py new file mode 100644 index 00000000000..7128b06ffcb --- /dev/null +++ b/cloud/openstack/os_client_config.py @@ -0,0 +1,74 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +import os_client_config +from os_client_config import exceptions + +DOCUMENTATION = ''' +--- +module: os_client_config +short_description: Get OpenStack Client config +description: + - Get I(openstack) client config data from clouds.yaml or environment +version_added: "2.0" +notes: + - Facts are placed in the C(openstack.clouds) variable. +options: + clouds: + description: + - List of clouds to limit the return list to. No value means return + information on all configured clouds + required: false + default: [] +requirements: [ os-client-config ] +author: "Monty Taylor (@emonty)" +''' + +EXAMPLES = ''' +# Get list of clouds that do not support security groups +- os-client-config: +- debug: var={{ item }} + with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}" + +# Get the information back just about the mordred cloud +- os-client-config: + clouds: + - mordred +''' + + +def main(): + module = AnsibleModule(argument_spec=dict( + clouds=dict(required=False, default=[]), + )) + p = module.params + + try: + config = os_client_config.OpenStackConfig() + clouds = [] + for cloud in config.get_all_clouds(): + if not p['clouds'] or cloud.name in p['clouds']: + cloud.config['name'] = cloud.name + clouds.append(cloud.config) + module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds))) + except exceptions.OpenStackConfigException as e: + module.fail_json(msg=str(e)) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py new file mode 100644 index 00000000000..10827012ae8 --- /dev/null +++ b/cloud/openstack/os_floating_ip.py @@ -0,0 +1,198 @@ +#!/usr/bin/python +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Author: Davide Guerri +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + from shade import meta + + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_floating_ip +version_added: "2.0" +short_description: Add/Remove floating IP from an instance +extends_documentation_fragment: openstack +description: + - Add or Remove a floating IP to an instance +options: + server: + description: + - The name or ID of the instance to which the IP address + should be assigned. + required: true + network: + description: + - The name or ID of a neutron external network or a nova pool name. + required: false + floating_ip_address: + description: + - A floating IP address to attach or to detach. Required only if state + is absent. When state is present can be used to specify a IP address + to attach. + required: false + reuse: + description: + - When state is present, and floating_ip_address is not present, + this parameter can be used to specify whether we should try to reuse + a floating IP address already allocated to the project. + required: false + default: false + fixed_address: + description: + - To which fixed IP of server the floating IP address should be + attached to. + required: false + wait: + description: + - When attaching a floating IP address, specify whether we should + wait for it to appear as attached. + required: false + default: false + timeout: + description: + - Time to wait for an IP address to appear as attached. See wait. + required: false + default: 60 + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + required: false + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Assign a floating IP to the fist interface of `cattle001` from an exiting +# external network or nova pool. A new floating IP from the first available +# external network is allocated to the project. +- os_floating_ip: + cloud: dguerri + server: cattle001 + +# Assign a new floating IP to the instance fixed ip `192.0.2.3` of +# `cattle001`. If a free floating IP is already allocated to the project, it is +# reused; if not, a new one is created. +- os_floating_ip: + cloud: dguerri + state: present + reuse: yes + server: cattle001 + network: ext_net + fixed_address: 192.0.2.3 + wait: true + timeout: 180 + +# Detach a floating IP address from a server +- os_floating_ip: + cloud: dguerri + state: absent + floating_ip_address: 203.0.113.2 + server: cattle001 +''' + + +def _get_floating_ip(cloud, floating_ip_address): + f_ips = cloud.search_floating_ips( + filters={'floating_ip_address': floating_ip_address}) + if not f_ips: + return None + + return f_ips[0] + + +def main(): + argument_spec = openstack_full_argument_spec( + server=dict(required=True), + state=dict(default='present', choices=['absent', 'present']), + network=dict(required=False), + floating_ip_address=dict(required=False), + reuse=dict(required=False, type='bool', default=False), + fixed_address=dict(required=False), + wait=dict(required=False, type='bool', default=False), + timeout=dict(required=False, type='int', default=60), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + server_name_or_id = module.params['server'] + state = module.params['state'] + network = module.params['network'] + floating_ip_address = module.params['floating_ip_address'] + reuse = module.params['reuse'] + fixed_address = module.params['fixed_address'] + wait = module.params['wait'] + timeout = module.params['timeout'] + + cloud = shade.openstack_cloud(**module.params) + + try: + server = cloud.get_server(server_name_or_id) + if server is None: + module.fail_json( + msg="server {0} not found".format(server_name_or_id)) + + if state == 'present': + if floating_ip_address is None: + if reuse: + f_ip = cloud.available_floating_ip(network=network) + else: + f_ip = cloud.create_floating_ip(network=network) + else: + f_ip = _get_floating_ip(cloud, floating_ip_address) + if f_ip is None: + module.fail_json( + msg="floating IP {0} not found".format( + floating_ip_address)) + + cloud.attach_ip_to_server( + server_id=server['id'], floating_ip_id=f_ip['id'], + fixed_address=fixed_address, wait=wait, timeout=timeout) + # Update the floating IP status + f_ip = cloud.get_floating_ip(id=f_ip['id']) + module.exit_json(changed=True, floating_ip=f_ip) + + elif state == 'absent': + if floating_ip_address is None: + module.fail_json(msg="floating_ip_address is required") + + f_ip = _get_floating_ip(cloud, floating_ip_address) + + cloud.detach_ip_from_server( + server_id=server['id'], floating_ip_id=f_ip['id']) + # Update the floating IP status + f_ip = cloud.get_floating_ip(id=f_ip['id']) + module.exit_json(changed=True, floating_ip=f_ip) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message, extra_data=e.extra_data) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py new file mode 100644 index 00000000000..4687ce5e972 --- /dev/null +++ b/cloud/openstack/os_image.py @@ -0,0 +1,188 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +#TODO(mordred): we need to support "location"(v1) and "locations"(v2) +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_image +short_description: Add/Delete images from OpenStack Cloud +extends_documentation_fragment: openstack +version_added: "2.0" +author: "Monty Taylor (@emonty)" +description: + - Add or Remove images from the OpenStack Image Repository +options: + name: + description: + - Name that has to be given to the image + required: true + default: None + disk_format: + description: + - The format of the disk that is getting uploaded + required: false + default: qcow2 + container_format: + description: + - The format of the container + required: false + default: bare + owner: + description: + - The owner of the image + required: false + default: None + min_disk: + description: + - The minimum disk space required to deploy this image + required: false + default: None + min_ram: + description: + - The minimum ram required to deploy this image + required: false + default: None + is_public: + description: + - Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default. + required: false + default: 'yes' + filename: + description: + - The path to the file which has to be uploaded + required: false + default: None + ramdisk: + descrption: + - The name of an existing ramdisk image that will be associated with this image + required: false + default: None + kernel: + descrption: + - The name of an existing kernel image that will be associated with this image + required: false + default: None + properties: + description: + - Additional properties to be associated with this image + required: false + default: {} + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img +- os_image: + auth: + auth_url: http://localhost/auth/v2.0 + username: admin + password: passme + project_name: admin + name: cirros + container_format: bare + disk_format: qcow2 + state: present + filename: cirros-0.3.0-x86_64-disk.img + kernel: cirros-vmlinuz + ramdisk: cirros-initrd + properties: + cpu_arch: x86_64 + distro: ubuntu +''' + + +def main(): + + argument_spec = openstack_full_argument_spec( + name = dict(required=True), + disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']), + container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']), + owner = dict(default=None), + min_disk = dict(default=None), + min_ram = dict(default=None), + is_public = dict(default=False), + filename = dict(default=None), + ramdisk = dict(default=None), + kernel = dict(default=None), + properties = dict(default={}), + state = dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + try: + cloud = shade.openstack_cloud(**module.params) + + changed = False + image = cloud.get_image(name_or_id=module.params['name']) + + if module.params['state'] == 'present': + if not image: + image = cloud.create_image( + name=module.params['name'], + filename=module.params['filename'], + disk_format=module.params['disk_format'], + container_format=module.params['container_format'], + wait=module.params['wait'], + timeout=module.params['timeout'] + ) + changed = True + if not module.params['wait']: + module.exit_json(changed=changed, image=image, id=image.id) + + cloud.update_image_properties( + image=image, + kernel=module.params['kernel'], + ramdisk=module.params['ramdisk'], + **module.params['properties']) + image = cloud.get_image(name_or_id=image.id) + module.exit_json(changed=changed, image=image, id=image.id) + + elif module.params['state'] == 'absent': + if not image: + changed = False + else: + cloud.delete_image( + name_or_id=module.params['name'], + wait=module.params['wait'], + timeout=module.params['timeout']) + changed = True + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message, extra_data=e.extra_data) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() diff --git a/cloud/openstack/os_ironic.py b/cloud/openstack/os_ironic.py new file mode 100644 index 00000000000..0ec4366b79f --- /dev/null +++ b/cloud/openstack/os_ironic.py @@ -0,0 +1,353 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# (c) 2014, Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +import jsonpatch +DOCUMENTATION = ''' +--- +module: os_ironic +short_description: Create/Delete Bare Metal Resources from OpenStack +extends_documentation_fragment: openstack +author: "Monty Taylor (@emonty)" +version_added: "2.0" +description: + - Create or Remove Ironic nodes from OpenStack. +options: + state: + description: + - Indicates desired state of the resource + choices: ['present', 'absent'] + default: present + uuid: + description: + - globally unique identifier (UUID) to be given to the resource. Will + be auto-generated if not specified, and name is specified. + - Definition of a UUID will always take precedence to a name value. + required: false + default: None + name: + description: + - unique name identifier to be given to the resource. + required: false + default: None + driver: + description: + - The name of the Ironic Driver to use with this node. + required: true + default: None + chassis_uuid: + description: + - Associate the node with a pre-defined chassis. + required: false + default: None + ironic_url: + description: + - If noauth mode is utilized, this is required to be set to the + endpoint URL for the Ironic API. Use with "auth" and "auth_type" + settings set to None. + required: false + default: None + driver_info: + description: + - Information for this server's driver. Will vary based on which + driver is in use. Any sub-field which is populated will be validated + during creation. + suboptions: + power: + description: + - Information necessary to turn this server on / off. + This often includes such things as IPMI username, password, and IP address. + required: true + deploy: + description: + - Information necessary to deploy this server directly, without using Nova. THIS IS NOT RECOMMENDED. + console: + description: + - Information necessary to connect to this server's serial console. Not all drivers support this. + management: + description: + - Information necessary to interact with this server's management interface. May be shared by power_info in some cases. + required: true + nics: + description: + - 'A list of network interface cards, eg, " - mac: aa:bb:cc:aa:bb:cc"' + required: true + properties: + description: + - Definition of the physical characteristics of this server, used for scheduling purposes + suboptions: + cpu_arch: + description: + - CPU architecture (x86_64, i686, ...) + default: x86_64 + cpus: + description: + - Number of CPU cores this machine has + default: 1 + ram: + description: + - amount of RAM this machine has, in MB + default: 1 + disk_size: + description: + - size of first storage device in this machine (typically /dev/sda), in GB + default: 1 + skip_update_of_driver_password: + description: + - Allows the code that would assert changes to nodes to skip the + update if the change is a single line consisting of the password + field. As of Kilo, by default, passwords are always masked to API + requests, which means the logic as a result always attempts to + re-assert the password field. + required: false + default: false + +requirements: ["shade", "jsonpatch"] +''' + +EXAMPLES = ''' +# Enroll a node with some basic properties and driver info +- os_ironic: + cloud: "devstack" + driver: "pxe_ipmitool" + uuid: "00000000-0000-0000-0000-000000000002" + properties: + cpus: 2 + cpu_arch: "x86_64" + ram: 8192 + disk_size: 64 + nics: + - mac: "aa:bb:cc:aa:bb:cc" + - mac: "dd:ee:ff:dd:ee:ff" + driver_info: + power: + ipmi_address: "1.2.3.4" + ipmi_username: "admin" + ipmi_password: "adminpass" + chassis_uuid: "00000000-0000-0000-0000-000000000001" + +''' + + +def _parse_properties(module): + p = module.params['properties'] + props = dict( + cpu_arch=p.get('cpu_arch') if p.get('cpu_arch') else 'x86_64', + cpus=p.get('cpus') if p.get('cpus') else 1, + memory_mb=p.get('ram') if p.get('ram') else 1, + local_gb=p.get('disk_size') if p.get('disk_size') else 1, + ) + return props + + +def _parse_driver_info(module): + p = module.params['driver_info'] + info = p.get('power') + if not info: + raise shade.OpenStackCloudException( + "driver_info['power'] is required") + if p.get('console'): + info.update(p.get('console')) + if p.get('management'): + info.update(p.get('management')) + if p.get('deploy'): + info.update(p.get('deploy')) + return info + + +def _choose_id_value(module): + if module.params['uuid']: + return module.params['uuid'] + if module.params['name']: + return module.params['name'] + return None + + +def _is_value_true(value): + true_values = [True, 'yes', 'Yes', 'True', 'true'] + if value in true_values: + return True + return False + + +def _choose_if_password_only(module, patch): + if len(patch) is 1: + if 'password' in patch[0]['path'] and _is_value_true( + module.params['skip_update_of_masked_password']): + # Return false to aabort update as the password appears + # to be the only element in the patch. + return False + return True + + +def _exit_node_not_updated(module, server): + module.exit_json( + changed=False, + result="Node not updated", + uuid=server['uuid'], + provision_state=server['provision_state'] + ) + + +def main(): + argument_spec = openstack_full_argument_spec( + uuid=dict(required=False), + name=dict(required=False), + driver=dict(required=False), + driver_info=dict(type='dict', required=True), + nics=dict(type='list', required=True), + properties=dict(type='dict', default={}), + ironic_url=dict(required=False), + chassis_uuid=dict(required=False), + skip_update_of_masked_password=dict(required=False, choices=BOOLEANS), + state=dict(required=False, default='present') + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + if (module.params['auth_type'] in [None, 'None'] and + module.params['ironic_url'] is None): + module.fail_json(msg="Authentication appears to be disabled, " + "Please define an ironic_url parameter") + + if (module.params['ironic_url'] and + module.params['auth_type'] in [None, 'None']): + module.params['auth'] = dict( + endpoint=module.params['ironic_url'] + ) + + node_id = _choose_id_value(module) + + try: + cloud = shade.operator_cloud(**module.params) + server = cloud.get_machine(node_id) + if module.params['state'] == 'present': + if module.params['driver'] is None: + module.fail_json(msg="A driver must be defined in order " + "to set a node to present.") + + properties = _parse_properties(module) + driver_info = _parse_driver_info(module) + kwargs = dict( + driver=module.params['driver'], + properties=properties, + driver_info=driver_info, + name=module.params['name'], + ) + + if module.params['chassis_uuid']: + kwargs['chassis_uuid'] = module.params['chassis_uuid'] + + if server is None: + # Note(TheJulia): Add a specific UUID to the request if + # present in order to be able to re-use kwargs for if + # the node already exists logic, since uuid cannot be + # updated. + if module.params['uuid']: + kwargs['uuid'] = module.params['uuid'] + + server = cloud.register_machine(module.params['nics'], + **kwargs) + module.exit_json(changed=True, uuid=server['uuid'], + provision_state=server['provision_state']) + else: + # TODO(TheJulia): Presently this does not support updating + # nics. Support needs to be added. + # + # Note(TheJulia): This message should never get logged + # however we cannot realistically proceed if neither a + # name or uuid was supplied to begin with. + if not node_id: + module.fail_json(msg="A uuid or name value " + "must be defined") + + # Note(TheJulia): Constructing the configuration to compare + # against. The items listed in the server_config block can + # be updated via the API. + + server_config = dict( + driver=server['driver'], + properties=server['properties'], + driver_info=server['driver_info'], + name=server['name'], + ) + + # Add the pre-existing chassis_uuid only if + # it is present in the server configuration. + if hasattr(server, 'chassis_uuid'): + server_config['chassis_uuid'] = server['chassis_uuid'] + + # Note(TheJulia): If a password is defined and concealed, a + # patch will always be generated and re-asserted. + patch = jsonpatch.JsonPatch.from_diff(server_config, kwargs) + + if not patch: + _exit_node_not_updated(module, server) + elif _choose_if_password_only(module, list(patch)): + # Note(TheJulia): Normally we would allow the general + # exception catch below, however this allows a specific + # message. + try: + server = cloud.patch_machine( + server['uuid'], + list(patch)) + except Exception as e: + module.fail_json(msg="Failed to update node, " + "Error: %s" % e.message) + + # Enumerate out a list of changed paths. + change_list = [] + for change in list(patch): + change_list.append(change['path']) + module.exit_json(changed=True, + result="Node Updated", + changes=change_list, + uuid=server['uuid'], + provision_state=server['provision_state']) + + # Return not updated by default as the conditions were not met + # to update. + _exit_node_not_updated(module, server) + + if module.params['state'] == 'absent': + if not node_id: + module.fail_json(msg="A uuid or name value must be defined " + "in order to remove a node.") + + if server is not None: + cloud.unregister_machine(module.params['nics'], + server['uuid']) + module.exit_json(changed=True, result="deleted") + else: + module.exit_json(changed=False, result="Server not found") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() diff --git a/cloud/openstack/os_ironic_node.py b/cloud/openstack/os_ironic_node.py new file mode 100644 index 00000000000..f087581ca0a --- /dev/null +++ b/cloud/openstack/os_ironic_node.py @@ -0,0 +1,333 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# (c) 2015, Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_ironic_node +short_description: Activate/Deactivate Bare Metal Resources from OpenStack +author: "Monty Taylor (@emonty)" +extends_documentation_fragment: openstack +version_added: "2.0" +description: + - Deploy to nodes controlled by Ironic. +options: + state: + description: + - Indicates desired state of the resource + choices: ['present', 'absent'] + default: present + deploy: + description: + - Indicates if the resource should be deployed. Allows for deployment + logic to be disengaged and control of the node power or maintenance + state to be changed. + choices: ['true', 'false'] + default: true + uuid: + description: + - globally unique identifier (UUID) to be given to the resource. + required: false + default: None + ironic_url: + description: + - If noauth mode is utilized, this is required to be set to the + endpoint URL for the Ironic API. Use with "auth" and "auth_type" + settings set to None. + required: false + default: None + config_drive: + description: + - A configdrive file or HTTP(S) URL that will be passed along to the + node. + required: false + default: None + instance_info: + description: + - Definition of the instance information which is used to deploy + the node. This information is only required when an instance is + set to present. + suboptions: + image_source: + description: + - An HTTP(S) URL where the image can be retrieved from. + image_checksum: + description: + - The checksum of image_source. + image_disk_format: + description: + - The type of image that has been requested to be deployed. + power: + description: + - A setting to allow power state to be asserted allowing nodes + that are not yet deployed to be powered on, and nodes that + are deployed to be powered off. + choices: ['present', 'absent'] + default: present + maintenance: + description: + - A setting to allow the direct control if a node is in + maintenance mode. + required: false + default: false + maintenance_reason: + description: + - A string expression regarding the reason a node is in a + maintenance mode. + required: false + default: None +''' + +EXAMPLES = ''' +# Activate a node by booting an image with a configdrive attached +os_ironic_node: + cloud: "openstack" + uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69" + state: present + power: present + deploy: True + maintenance: False + config_drive: "http://192.168.1.1/host-configdrive.iso" + instance_info: + image_source: "http://192.168.1.1/deploy_image.img" + image_checksum: "356a6b55ecc511a20c33c946c4e678af" + image_disk_format: "qcow" + delegate_to: localhost +''' + + +def _choose_id_value(module): + if module.params['uuid']: + return module.params['uuid'] + if module.params['name']: + return module.params['name'] + return None + + +# TODO(TheJulia): Change this over to use the machine patch method +# in shade once it is available. +def _prepare_instance_info_patch(instance_info): + patch = [] + patch.append({ + 'op': 'replace', + 'path': '/instance_info', + 'value': instance_info + }) + return patch + + +def _is_true(value): + true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on'] + if value in true_values: + return True + return False + + +def _is_false(value): + false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off'] + if value in false_values: + return True + return False + + +def _check_set_maintenance(module, cloud, node): + if _is_true(module.params['maintenance']): + if _is_false(node['maintenance']): + cloud.set_machine_maintenance_state( + node['uuid'], + True, + reason=module.params['maintenance_reason']) + module.exit_json(changed=True, msg="Node has been set into " + "maintenance mode") + else: + # User has requested maintenance state, node is already in the + # desired state, checking to see if the reason has changed. + if (str(node['maintenance_reason']) not in + str(module.params['maintenance_reason'])): + cloud.set_machine_maintenance_state( + node['uuid'], + True, + reason=module.params['maintenance_reason']) + module.exit_json(changed=True, msg="Node maintenance reason " + "updated, cannot take any " + "additional action.") + elif _is_false(module.params['maintenance']): + if node['maintenance'] is True: + cloud.remove_machine_from_maintenance(node['uuid']) + return True + else: + module.fail_json(msg="maintenance parameter was set but a valid " + "the value was not recognized.") + return False + + +def _check_set_power_state(module, cloud, node): + if 'power on' in str(node['power_state']): + if _is_false(module.params['power']): + # User has requested the node be powered off. + cloud.set_machine_power_off(node['uuid']) + module.exit_json(changed=True, msg="Power requested off") + if 'power off' in str(node['power_state']): + if (_is_false(module.params['power']) and + _is_false(module.params['state'])): + return False + if (_is_false(module.params['power']) and + _is_false(module.params['state'])): + module.exit_json( + changed=False, + msg="Power for node is %s, node must be reactivated " + "OR set to state absent" + ) + # In the event the power has been toggled on and + # deployment has been requested, we need to skip this + # step. + if (_is_true(module.params['power']) and + _is_false(module.params['deploy'])): + # Node is powered down when it is not awaiting to be provisioned + cloud.set_machine_power_on(node['uuid']) + return True + # Default False if no action has been taken. + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + uuid=dict(required=False), + name=dict(required=False), + instance_info=dict(type='dict', required=False), + config_drive=dict(required=False), + ironic_url=dict(required=False), + state=dict(required=False, default='present'), + maintenance=dict(required=False), + maintenance_reason=dict(required=False), + power=dict(required=False, default='present'), + deploy=dict(required=False, default=True), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + if (module.params['auth_type'] in [None, 'None'] and + module.params['ironic_url'] is None): + module.fail_json(msg="Authentication appears disabled, Please " + "define an ironic_url parameter") + + if (module.params['ironic_url'] and + module.params['auth_type'] in [None, 'None']): + module.params['auth'] = dict( + endpoint=module.params['ironic_url'] + ) + + node_id = _choose_id_value(module) + + if not node_id: + module.fail_json(msg="A uuid or name value must be defined " + "to use this module.") + try: + cloud = shade.operator_cloud(**module.params) + node = cloud.get_machine(node_id) + + if node is None: + module.fail_json(msg="node not found") + + uuid = node['uuid'] + instance_info = module.params['instance_info'] + changed = False + + # User has reqeusted desired state to be in maintenance state. + if module.params['state'] is 'maintenance': + module.params['maintenance'] = True + + if node['provision_state'] in [ + 'cleaning', + 'deleting', + 'wait call-back']: + module.fail_json(msg="Node is in %s state, cannot act upon the " + "request as the node is in a transition " + "state" % node['provision_state']) + # TODO(TheJulia) This is in-development code, that requires + # code in the shade library that is still in development. + if _check_set_maintenance(module, cloud, node): + if node['provision_state'] in 'active': + module.exit_json(changed=True, + result="Maintenance state changed") + changed = True + node = cloud.get_machine(node_id) + + if _check_set_power_state(module, cloud, node): + changed = True + node = cloud.get_machine(node_id) + + if _is_true(module.params['state']): + if _is_false(module.params['deploy']): + module.exit_json( + changed=changed, + result="User request has explicitly disabled " + "deployment logic" + ) + + if 'active' in node['provision_state']: + module.exit_json( + changed=changed, + result="Node already in an active state." + ) + + if instance_info is None: + module.fail_json( + changed=changed, + msg="When setting an instance to present, " + "instance_info is a required variable.") + + # TODO(TheJulia): Update instance info, however info is + # deployment specific. Perhaps consider adding rebuild + # support, although there is a known desire to remove + # rebuild support from Ironic at some point in the future. + patch = _prepare_instance_info_patch(instance_info) + cloud.set_node_instance_info(uuid, patch) + cloud.validate_node(uuid) + cloud.activate_node(uuid, module.params['config_drive']) + # TODO(TheJulia): Add more error checking and a wait option. + # We will need to loop, or just add the logic to shade, + # although this could be a very long running process as + # baremetal deployments are not a "quick" task. + module.exit_json(changed=changed, result="node activated") + + elif _is_false(module.params['state']): + if node['provision_state'] not in "deleted": + cloud.purge_node_instance_info(uuid) + cloud.deactivate_node(uuid) + module.exit_json(changed=True, result="deleted") + else: + module.exit_json(changed=False, result="node not found") + else: + module.fail_json(msg="State must be present, absent, " + "maintenance, off") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py new file mode 100644 index 00000000000..f62cc51bf64 --- /dev/null +++ b/cloud/openstack/os_keypair.py @@ -0,0 +1,167 @@ +#!/usr/bin/python + +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# Copyright (c) 2013, John Dewey +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_keypair +short_description: Add/Delete a keypair from OpenStack +extends_documentation_fragment: openstack +version_added: "2.0" +description: + - Add or Remove key pair from OpenStack +options: + name: + description: + - Name that has to be given to the key pair + required: true + default: None + public_key: + description: + - The public key that would be uploaded to nova and injected into VMs + upon creation. + required: false + default: None + public_key_file: + description: + - Path to local file containing ssh public key. Mutually exclusive + with public_key. + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: [] +''' + +EXAMPLES = ''' +# Creates a key pair with the running users public key +- os_keypair: + cloud: mordred + state: present + name: ansible_key + public_key_file: /home/me/.ssh/id_rsa.pub + +# Creates a new key pair and the private key returned after the run. +- os_keypair: + cloud: rax-dfw + state: present + name: ansible_key +''' + +RETURN = ''' +id: + description: Unique UUID. + returned: success + type: string +name: + description: Name given to the keypair. + returned: success + type: string +public_key: + description: The public key value for the keypair. + returned: success + type: string +private_key: + description: The private key value for the keypair. + returned: Only when a keypair is generated for the user (e.g., when creating one + and a public key is not specified). + type: string +''' + + +def _system_state_change(module, keypair): + state = module.params['state'] + if state == 'present' and not keypair: + return True + if state == 'absent' and keypair: + return True + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + name = dict(required=True), + public_key = dict(default=None), + public_key_file = dict(default=None), + state = dict(default='present', + choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[['public_key', 'public_key_file']]) + + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + name = module.params['name'] + public_key = module.params['public_key'] + + if module.params['public_key_file']: + public_key = open(module.params['public_key_file']).read() + public_key = public_key.rstrip() + + try: + cloud = shade.openstack_cloud(**module.params) + keypair = cloud.get_keypair(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, keypair)) + + if state == 'present': + if keypair and keypair['name'] == name: + if public_key and (public_key != keypair['public_key']): + module.fail_json( + msg="Key name %s present but key hash not the same" + " as offered. Delete key first." % name + ) + else: + module.exit_json(changed=False, key=keypair) + + new_key = cloud.create_keypair(name, public_key) + module.exit_json(changed=True, key=new_key) + + elif state == 'absent': + if keypair: + cloud.delete_keypair(name) + module.exit_json(changed=True) + module.exit_json(changed=False) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py new file mode 100644 index 00000000000..75c431493f6 --- /dev/null +++ b/cloud/openstack/os_network.py @@ -0,0 +1,107 @@ +#!/usr/bin/python + +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_network +short_description: Creates/Removes networks from OpenStack +extends_documentation_fragment: openstack +version_added: "2.0" +author: "Monty Taylor (@emonty)" +description: + - Add or Remove network from OpenStack. +options: + name: + description: + - Name to be assigned to the network. + required: true + shared: + description: + - Whether this network is shared or not. + required: false + default: false + admin_state_up: + description: + - Whether the state should be marked as up or down. + required: false + default: true + state: + description: + - Indicate desired state of the resource. + choices: ['present', 'absent'] + required: false + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +- os_network: + name=t1network + state=present +''' + + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + shared=dict(default=False, type='bool'), + admin_state_up=dict(default=True, type='bool'), + state=dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + name = module.params['name'] + shared = module.params['shared'] + admin_state_up = module.params['admin_state_up'] + + try: + cloud = shade.openstack_cloud(**module.params) + net = cloud.get_network(name) + + if state == 'present': + if not net: + net = cloud.create_network(name, shared, admin_state_up) + module.exit_json(changed=False, network=net, id=net['id']) + + elif state == 'absent': + if not net: + module.exit_json(changed=False) + else: + cloud.delete_network(name) + module.exit_json(changed=True) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() diff --git a/cloud/openstack/os_nova_flavor.py b/cloud/openstack/os_nova_flavor.py new file mode 100644 index 00000000000..82b3a53aa3d --- /dev/null +++ b/cloud/openstack/os_nova_flavor.py @@ -0,0 +1,237 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_nova_flavor +short_description: Manage OpenStack compute flavors +extends_documentation_fragment: openstack +version_added: "2.0" +author: "David Shrewsbury (@Shrews)" +description: + - Add or remove flavors from OpenStack. +options: + state: + description: + - Indicate desired state of the resource. When I(state) is 'present', + then I(ram), I(vcpus), and I(disk) are all required. There are no + default values for those parameters. + choices: ['present', 'absent'] + required: false + default: present + name: + description: + - Flavor name. + required: true + ram: + description: + - Amount of memory, in MB. + required: false + default: null + vcpus: + description: + - Number of virtual CPUs. + required: false + default: null + disk: + description: + - Size of local disk, in GB. + required: false + default: null + ephemeral: + description: + - Ephemeral space size, in GB. + required: false + default: 0 + swap: + description: + - Swap space size, in MB. + required: false + default: 0 + rxtx_factor: + description: + - RX/TX factor. + required: false + default: 1.0 + is_public: + description: + - Make flavor accessible to the public. + required: false + default: true + flavorid: + description: + - ID for the flavor. This is optional as a unique UUID will be + assigned if a value is not specified. + required: false + default: "auto" +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of +# local disk, and 10GB of ephemeral. +- os_nova_flavor: + cloud=mycloud + state=present + name=tiny + ram=1024 + vcpus=1 + disk=10 + ephemeral=10 + +# Delete 'tiny' flavor +- os_nova_flavor: + cloud=mycloud + state=absent + name=tiny +''' + +RETURN = ''' +flavor: + description: Dictionary describing the flavor. + returned: On success when I(state) is 'present' + type: dictionary + contains: + id: + description: Flavor ID. + returned: success + type: string + sample: "515256b8-7027-4d73-aa54-4e30a4a4a339" + name: + description: Flavor name. + returned: success + type: string + sample: "tiny" + disk: + description: Size of local disk, in GB. + returned: success + type: int + sample: 10 + ephemeral: + description: Ephemeral space size, in GB. + returned: success + type: int + sample: 10 + ram: + description: Amount of memory, in MB. + returned: success + type: int + sample: 1024 + swap: + description: Swap space size, in MB. + returned: success + type: int + sample: 100 + vcpus: + description: Number of virtual CPUs. + returned: success + type: int + sample: 2 + is_public: + description: Make flavor accessible to the public. + returned: success + type: bool + sample: true +''' + + +def _system_state_change(module, flavor): + state = module.params['state'] + if state == 'present' and not flavor: + return True + if state == 'absent' and flavor: + return True + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + state = dict(required=False, default='present', + choices=['absent', 'present']), + name = dict(required=False), + + # required when state is 'present' + ram = dict(required=False, type='int'), + vcpus = dict(required=False, type='int'), + disk = dict(required=False, type='int'), + + ephemeral = dict(required=False, default=0, type='int'), + swap = dict(required=False, default=0, type='int'), + rxtx_factor = dict(required=False, default=1.0, type='float'), + is_public = dict(required=False, default=True, type='bool'), + flavorid = dict(required=False, default="auto"), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule( + argument_spec, + supports_check_mode=True, + required_if=[ + ('state', 'present', ['ram', 'vcpus', 'disk']) + ], + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + name = module.params['name'] + + try: + cloud = shade.operator_cloud(**module.params) + flavor = cloud.get_flavor(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, flavor)) + + if state == 'present': + if not flavor: + flavor = cloud.create_flavor( + name=name, + ram=module.params['ram'], + vcpus=module.params['vcpus'], + disk=module.params['disk'], + flavorid=module.params['flavorid'], + ephemeral=module.params['ephemeral'], + swap=module.params['swap'], + rxtx_factor=module.params['rxtx_factor'], + is_public=module.params['is_public'] + ) + module.exit_json(changed=True, flavor=flavor) + module.exit_json(changed=False, flavor=flavor) + + elif state == 'absent': + if flavor: + cloud.delete_flavor(name) + module.exit_json(changed=True) + module.exit_json(changed=False) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_object.py b/cloud/openstack/os_object.py new file mode 100644 index 00000000000..a009d913a8a --- /dev/null +++ b/cloud/openstack/os_object.py @@ -0,0 +1,125 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_object +short_description: Create or Delete objects and containers from OpenStack +version_added: "2.0" +author: "Monty Taylor (@emonty)" +extends_documentation_fragment: openstack +description: + - Create or Delete objects and containers from OpenStack +options: + container: + description: + - The name of the container in which to create the object + required: true + name: + description: + - Name to be give to the object. If omitted, operations will be on + the entire container + required: false + file: + description: + - Path to local file to be uploaded. + required: false + container_access: + description: + - desired container access level. + required: false + choices: ['private', 'public'] + default: private + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +''' + +EXAMPLES = ''' +# Creates a object named 'fstab' in the 'config' container +- os_object: cloud=mordred state=present name=fstab container=config file=/etc/fstab + +# Deletes a container called config and all of its contents +- os_object: cloud=rax-iad state=absent container=config +''' + + +def process_object( + cloud_obj, container, name, filename, container_access, **kwargs): + + changed = False + container_obj = cloud_obj.get_container(container) + if kwargs['state'] == 'present': + if not container_obj: + container_obj = cloud_obj.create_container(container) + changed = True + if cloud_obj.get_container_access(container) != container_access: + cloud_obj.set_container_access(container, container_access) + changed = True + if name: + if cloud_obj.is_object_stale(container, name, filename): + cloud_obj.create_object(container, name, filename) + changed = True + else: + if container_obj: + if name: + if cloud_obj.get_object_metadata(container, name): + cloud_obj.delete_object(container, name) + changed= True + else: + cloud_obj.delete_container(container) + changed= True + return changed + + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + container=dict(required=True), + filename=dict(required=False, default=None), + container_access=dict(default='private', choices=['private', 'public']), + state=dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + try: + cloud = shade.openstack_cloud(**module.params) + + changed = process_object(cloud, **module.params) + + module.exit_json(changed=changed) + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py new file mode 100644 index 00000000000..e42b7f938f5 --- /dev/null +++ b/cloud/openstack/os_security_group.py @@ -0,0 +1,142 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_security_group +short_description: Add/Delete security groups from an OpenStack cloud. +extends_documentation_fragment: openstack +author: "Monty Taylor (@emonty)" +version_added: "2.0" +description: + - Add or Remove security groups from an OpenStack cloud. +options: + name: + description: + - Name that has to be given to the security group. This module + requires that security group names be unique. + required: true + description: + description: + - Long description of the purpose of the security group + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +''' + +EXAMPLES = ''' +# Create a security group +- os_security_group: + cloud=mordred + state=present + name=foo + description=security group for foo servers + +# Update the existing 'foo' security group description +- os_security_group: + cloud=mordred + state=present + name=foo + description=updated description for the foo security group +''' + + +def _needs_update(module, secgroup): + """Check for differences in the updatable values. + + NOTE: We don't currently allow name updates. + """ + if secgroup['description'] != module.params['description']: + return True + return False + + +def _system_state_change(module, secgroup): + state = module.params['state'] + if state == 'present': + if not secgroup: + return True + return _needs_update(module, secgroup) + if state == 'absent' and secgroup: + return True + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + description=dict(default=None), + state=dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + name = module.params['name'] + state = module.params['state'] + description = module.params['description'] + + try: + cloud = shade.openstack_cloud(**module.params) + secgroup = cloud.get_security_group(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, secgroup)) + + changed = False + if state == 'present': + if not secgroup: + secgroup = cloud.create_security_group(name, description) + changed = True + else: + if _needs_update(module, secgroup): + secgroup = cloud.update_security_group( + secgroup['id'], description=description) + changed = True + module.exit_json( + changed=changed, id=secgroup['id'], secgroup=secgroup) + + if state == 'absent': + if secgroup: + cloud.delete_security_group(secgroup['id']) + changed = True + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py new file mode 100644 index 00000000000..91059aca015 --- /dev/null +++ b/cloud/openstack/os_security_group_rule.py @@ -0,0 +1,325 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_security_group_rule +short_description: Add/Delete rule from an existing security group +extends_documentation_fragment: openstack +version_added: "2.0" +description: + - Add or Remove rule from an existing security group +options: + security_group: + description: + - Name of the security group + required: true + protocol: + description: + - IP protocol + choices: ['tcp', 'udp', 'icmp', None] + default: None + port_range_min: + description: + - Starting port + required: false + default: None + port_range_max: + description: + - Ending port + required: false + default: None + remote_ip_prefix: + description: + - Source IP address(es) in CIDR notation (exclusive with remote_group) + required: false + remote_group: + description: + - ID of Security group to link (exclusive with remote_ip_prefix) + required: false + ethertype: + description: + - Must be IPv4 or IPv6, and addresses represented in CIDR must + match the ingress or egress rules. Not all providers support IPv6. + choices: ['IPv4', 'IPv6'] + default: IPv4 + direction: + description: + - The direction in which the security group rule is applied. Not + all providers support egress. + choices: ['egress', 'ingress'] + default: ingress + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Create a security group rule +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: tcp + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: 0.0.0.0/0 + +# Create a security group rule for ping +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: icmp + remote_ip_prefix: 0.0.0.0/0 + +# Another way to create the ping rule +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: icmp + port_range_min: -1 + port_range_max: -1 + remote_ip_prefix: 0.0.0.0/0 + +# Create a TCP rule covering all ports +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: tcp + port_range_min: 1 + port_range_max: 65535 + remote_ip_prefix: 0.0.0.0/0 + +# Another way to create the TCP rule above (defaults to all ports) +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: tcp + remote_ip_prefix: 0.0.0.0/0 +''' + +RETURN = ''' +id: + description: Unique rule UUID. + type: string +direction: + description: The direction in which the security group rule is applied. + type: string + sample: 'egress' +ethertype: + description: One of IPv4 or IPv6. + type: string + sample: 'IPv4' +port_range_min: + description: The minimum port number in the range that is matched by + the security group rule. + type: int + sample: 8000 +port_range_max: + description: The maximum port number in the range that is matched by + the security group rule. + type: int + sample: 8000 +protocol: + description: The protocol that is matched by the security group rule. + type: string + sample: 'tcp' +remote_ip_prefix: + description: The remote IP prefix to be associated with this security group rule. + type: string + sample: '0.0.0.0/0' +security_group_id: + description: The security group ID to associate with this security group rule. + type: string +''' + + +def _ports_match(protocol, module_min, module_max, rule_min, rule_max): + """ + Capture the complex port matching logic. + + The port values coming in for the module might be -1 (for ICMP), + which will work only for Nova, but this is handled by shade. Likewise, + they might be None, which works for Neutron, but not Nova. This too is + handled by shade. Since shade will consistently return these port + values as None, we need to convert any -1 values input to the module + to None here for comparison. + + For TCP and UDP protocols, None values for both min and max are + represented as the range 1-65535 for Nova, but remain None for + Neutron. Shade returns the full range when Nova is the backend (since + that is how Nova stores them), and None values for Neutron. If None + values are input to the module for both values, then we need to adjust + for comparison. + """ + + # Check if the user is supplying -1 for ICMP. + if protocol == 'icmp': + if module_min and int(module_min) == -1: + module_min = None + if module_max and int(module_max) == -1: + module_max = None + + # Check if user is supplying None values for full TCP/UDP port range. + if protocol in ['tcp', 'udp'] and module_min is None and module_max is None: + if (rule_min and int(rule_min) == 1 + and rule_max and int(rule_max) == 65535): + # (None, None) == (1, 65535) + return True + + # Sanity check to make sure we don't have type comparison issues. + if module_min: + module_min = int(module_min) + if module_max: + module_max = int(module_max) + if rule_min: + rule_min = int(rule_min) + if rule_max: + rule_max = int(rule_max) + + return module_min == rule_min and module_max == rule_max + + +def _find_matching_rule(module, secgroup): + """ + Find a rule in the group that matches the module parameters. + :returns: The matching rule dict, or None if no matches. + """ + protocol = module.params['protocol'] + remote_ip_prefix = module.params['remote_ip_prefix'] + ethertype = module.params['ethertype'] + direction = module.params['direction'] + + for rule in secgroup['security_group_rules']: + if (protocol == rule['protocol'] + and remote_ip_prefix == rule['remote_ip_prefix'] + and ethertype == rule['ethertype'] + and direction == rule['direction'] + and _ports_match(protocol, + module.params['port_range_min'], + module.params['port_range_max'], + rule['port_range_min'], + rule['port_range_max'])): + return rule + return None + + +def _system_state_change(module, secgroup): + state = module.params['state'] + if secgroup: + rule_exists = _find_matching_rule(module, secgroup) + else: + return False + + if state == 'present' and not rule_exists: + return True + if state == 'absent' and rule_exists: + return True + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + security_group = dict(required=True), + # NOTE(Shrews): None is an acceptable protocol value for + # Neutron, but Nova will balk at this. + protocol = dict(default=None, + choices=[None, 'tcp', 'udp', 'icmp']), + port_range_min = dict(required=False, type='int'), + port_range_max = dict(required=False, type='int'), + remote_ip_prefix = dict(required=False, default=None), + # TODO(mordred): Make remote_group handle name and id + remote_group = dict(required=False, default=None), + ethertype = dict(default='IPv4', + choices=['IPv4', 'IPv6']), + direction = dict(default='ingress', + choices=['egress', 'ingress']), + state = dict(default='present', + choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[ + ['remote_ip_prefix', 'remote_group'], + ] + ) + + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + security_group = module.params['security_group'] + changed = False + + try: + cloud = shade.openstack_cloud(**module.params) + secgroup = cloud.get_security_group(security_group) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, secgroup)) + + if state == 'present': + if not secgroup: + module.fail_json(msg='Could not find security group %s' % + security_group) + + rule = _find_matching_rule(module, secgroup) + if not rule: + rule = cloud.create_security_group_rule( + secgroup['id'], + port_range_min=module.params['port_range_min'], + port_range_max=module.params['port_range_max'], + protocol=module.params['protocol'], + remote_ip_prefix=module.params['remote_ip_prefix'], + remote_group_id=module.params['remote_group'], + direction=module.params['direction'], + ethertype=module.params['ethertype'] + ) + changed = True + module.exit_json(changed=changed, rule=rule, id=rule['id']) + + if state == 'absent' and secgroup: + rule = _find_matching_rule(module, secgroup) + if rule: + cloud.delete_security_group_rule(rule['id']) + changed = True + + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index a14c738be4a..959f39880f8 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -33,6 +33,7 @@ module: os_server short_description: Create/Delete Compute Instances from OpenStack extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Create or Remove compute instances from OpenStack. options: @@ -89,6 +90,11 @@ options: - Ensure instance has public ip however the cloud wants to do that required: false default: 'yes' + auto_floating_ip: + description: + - If the module should automatically assign a floating IP + required: false + default: 'yes' floating_ips: description: - list of valid floating IPs that pre-exist to assign to this node @@ -240,7 +246,8 @@ EXAMPLES = ''' def _exit_hostvars(module, cloud, server, changed=True): hostvars = meta.get_hostvars_from_server(cloud, server) - module.exit_json(changed=changed, id=server.id, openstack=hostvars) + module.exit_json( + changed=changed, server=server, id=server.id, openstack=hostvars) def _network_args(module, cloud): diff --git a/cloud/openstack/os_server_actions.py b/cloud/openstack/os_server_actions.py new file mode 100644 index 00000000000..0cfc5bf47cc --- /dev/null +++ b/cloud/openstack/os_server_actions.py @@ -0,0 +1,193 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# Copyright (c) 2015, Jesse Keating +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + from shade import meta + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_server_actions +short_description: Perform actions on Compute Instances from OpenStack +extends_documentation_fragment: openstack +version_added: "2.0" +author: "Jesse Keating (@j2sol)" +description: + - Perform server actions on an existing compute instance from OpenStack. + This module does not return any data other than changed true/false. +options: + server: + description: + - Name or ID of the instance + required: true + wait: + description: + - If the module should wait for the instance action to be performed. + required: false + default: 'yes' + timeout: + description: + - The amount of time the module should wait for the instance to perform + the requested action. + required: false + default: 180 + action: + description: + - Perform the given action. The lock and unlock actions always return + changed as the servers API does not provide lock status. + choices: [pause, unpause, lock, unlock, suspend, resume] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Pauses a compute instance +- os_server_actions: + action: pause + auth: + auth_url: https://mycloud.openstack.blueboxgrid.com:5001/v2.0 + username: admin + password: admin + project_name: admin + server: vm1 + timeout: 200 +''' + +_action_map = {'pause': 'PAUSED', + 'unpause': 'ACTIVE', + 'lock': 'ACTIVE', # API doesn't show lock/unlock status + 'unlock': 'ACTIVE', + 'suspend': 'SUSPENDED', + 'resume': 'ACTIVE',} + +_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock'] + +def _wait(timeout, cloud, server, action): + """Wait for the server to reach the desired state for the given action.""" + + for count in shade._iterate_timeout( + timeout, + "Timeout waiting for server to complete %s" % action): + try: + server = cloud.get_server(server.id) + except Exception: + continue + + if server.status == _action_map[action]: + return + + if server.status == 'ERROR': + module.fail_json(msg="Server reached ERROR state while attempting to %s" % action) + +def _system_state_change(action, status): + """Check if system state would change.""" + if status == _action_map[action]: + return False + return True + +def main(): + argument_spec = openstack_full_argument_spec( + server=dict(required=True), + action=dict(required=True, choices=['pause', 'unpause', 'lock', 'unlock', 'suspend', + 'resume']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + action = module.params['action'] + wait = module.params['wait'] + timeout = module.params['timeout'] + + try: + if action in _admin_actions: + cloud = shade.operator_cloud(**module.params) + else: + cloud = shade.openstack_cloud(**module.params) + server = cloud.get_server(module.params['server']) + if not server: + module.fail_json(msg='Could not find server %s' % server) + status = server.status + + if module.check_mode: + module.exit_json(changed=_system_state_change(action, status)) + + if action == 'pause': + if not _system_state_change(action, status): + module.exit_json(changed=False) + + cloud.nova_client.servers.pause(server=server.id) + if wait: + _wait(timeout, cloud, server, action) + module.exit_json(changed=True) + + elif action == 'unpause': + if not _system_state_change(action, status): + module.exit_json(changed=False) + + cloud.nova_client.servers.unpause(server=server.id) + if wait: + _wait(timeout, cloud, server, action) + module.exit_json(changed=True) + + elif action == 'lock': + # lock doesn't set a state, just do it + cloud.nova_client.servers.lock(server=server.id) + module.exit_json(changed=True) + + elif action == 'unlock': + # unlock doesn't set a state, just do it + cloud.nova_client.servers.unlock(server=server.id) + module.exit_json(changed=True) + + elif action == 'suspend': + if not _system_state_change(action, status): + module.exit_json(changed=False) + + cloud.nova_client.servers.suspend(server=server.id) + if wait: + _wait(timeout, cloud, server, action) + module.exit_json(changed=True) + + elif action == 'resume': + if not _system_state_change(action, status): + module.exit_json(changed=False) + + cloud.nova_client.servers.resume(server=server.id) + if wait: + _wait(timeout, cloud, server, action) + module.exit_json(changed=True) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message, extra_data=e.extra_data) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_server_facts.py b/cloud/openstack/os_server_facts.py index fee14c7456c..5d61e4c18d3 100644 --- a/cloud/openstack/os_server_facts.py +++ b/cloud/openstack/os_server_facts.py @@ -27,6 +27,7 @@ DOCUMENTATION = ''' module: os_server_facts short_description: Retrieve facts about a compute instance version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Retrieve facts about a server instance from OpenStack. notes: diff --git a/cloud/openstack/os_server_volume.py b/cloud/openstack/os_server_volume.py index 47e1f433853..945a0ce8bf9 100644 --- a/cloud/openstack/os_server_volume.py +++ b/cloud/openstack/os_server_volume.py @@ -31,6 +31,7 @@ module: os_server_volume short_description: Attach/Detach Volumes from OpenStack VM's extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Attach or Detach volumes from OpenStack VM's options: diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index 75bf7b33313..b62eb10b0cc 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -29,6 +29,7 @@ module: os_subnet short_description: Add/Remove subnet to an OpenStack network extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Add or Remove a subnet to an OpenStack network options: @@ -91,6 +92,18 @@ options: - A list of host route dictionaries for the subnet. required: false default: None + ipv6_ra_mode: + description: + - IPv6 router advertisement mode + choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] + required: false + default: None + ipv6_address_mode: + description: + - IPv6 address mode + choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] + required: false + default: None requirements: - "python >= 2.6" - "shade" @@ -116,6 +129,19 @@ EXAMPLES = ''' - os_subnet: state=absent name=net1subnet + +# Create an ipv6 stateless subnet +- os_subnet: + state: present + name: intv6 + network_name: internal + ip_version: 6 + cidr: 2db8:1::/64 + dns_nameservers: + - 2001:4860:4860::8888 + - 2001:4860:4860::8844 + ipv6_ra_mode: dhcpv6-stateless + ipv6_address_mode: dhcpv6-stateless ''' @@ -162,6 +188,7 @@ def _system_state_change(module, subnet): def main(): + ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] argument_spec = openstack_full_argument_spec( name=dict(required=True), network_name=dict(default=None), @@ -173,6 +200,9 @@ def main(): allocation_pool_start=dict(default=None), allocation_pool_end=dict(default=None), host_routes=dict(default=None, type='list'), + ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices), + ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices), + state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() @@ -194,6 +224,8 @@ def main(): pool_start = module.params['allocation_pool_start'] pool_end = module.params['allocation_pool_end'] host_routes = module.params['host_routes'] + ipv6_ra_mode = module.params['ipv6_ra_mode'] + ipv6_a_mode = module.params['ipv6_address_mode'] # Check for required parameters when state == 'present' if state == 'present': @@ -224,8 +256,10 @@ def main(): gateway_ip=gateway_ip, dns_nameservers=dns, allocation_pools=pool, - host_routes=host_routes) - module.exit_json(changed=True, result="created") + host_routes=host_routes, + ipv6_ra_mode=ipv6_ra_mode, + ipv6_address_mode=ipv6_a_mode) + changed = True else: if _needs_update(subnet, module): cloud.update_subnet(subnet['id'], @@ -234,17 +268,21 @@ def main(): gateway_ip=gateway_ip, dns_nameservers=dns, allocation_pools=pool, - host_routes=host_routes) - module.exit_json(changed=True, result="updated") + host_routes=host_routes, + ipv6_ra_mode=ipv6_ra_mode, + ipv6_address_mode=ipv6_a_mode) + changed = True else: - module.exit_json(changed=False, result="success") + changed = False + module.exit_json(changed=changed) elif state == 'absent': if not subnet: - module.exit_json(changed=False, result="success") + changed = False else: + changed = True cloud.delete_subnet(subnet_name) - module.exit_json(changed=True, result="deleted") + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) diff --git a/cloud/openstack/os_volume.py b/cloud/openstack/os_volume.py index d5baffb96c6..80ad2adcbb2 100644 --- a/cloud/openstack/os_volume.py +++ b/cloud/openstack/os_volume.py @@ -29,6 +29,7 @@ module: os_volume short_description: Create/Delete Cinder Volumes extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Create or Remove cinder block storage volumes options: @@ -89,7 +90,7 @@ EXAMPLES = ''' def _present_volume(module, cloud): if cloud.volume_exists(module.params['display_name']): v = cloud.get_volume(module.params['display_name']) - module.exit_json(changed=False, id=v['id']) + module.exit_json(changed=False, id=v['id'], volume=v) volume_args = dict( size=module.params['size'], @@ -106,7 +107,7 @@ def _present_volume(module, cloud): volume = cloud.create_volume( wait=module.params['wait'], timeout=module.params['timeout'], **volume_args) - module.exit_json(changed=True, id=volume['id']) + module.exit_json(changed=True, id=volume['id'], volume=volume) def _absent_volume(module, cloud): @@ -116,8 +117,8 @@ def _absent_volume(module, cloud): wait=module.params['wait'], timeout=module.params['timeout']) except shade.OpenStackCloudTimeout: - module.exit_json(changed=False, result="Volume deletion timed-out") - module.exit_json(changed=True, result='Volume Deleted') + module.exit_json(changed=False) + module.exit_json(changed=True) def main(): diff --git a/cloud/openstack/quantum_router.py b/cloud/openstack/quantum_router.py index 9588fc0951e..ba94773bbe4 100644 --- a/cloud/openstack/quantum_router.py +++ b/cloud/openstack/quantum_router.py @@ -30,6 +30,7 @@ DOCUMENTATION = ''' --- module: quantum_router version_added: "1.2" +author: "Benno Joy (@bennojoy)" short_description: Create or Remove router from openstack description: - Create or Delete routers from OpenStack diff --git a/cloud/openstack/quantum_router_gateway.py b/cloud/openstack/quantum_router_gateway.py index 6e8047c8e8d..48248662ed7 100644 --- a/cloud/openstack/quantum_router_gateway.py +++ b/cloud/openstack/quantum_router_gateway.py @@ -30,6 +30,7 @@ DOCUMENTATION = ''' --- module: quantum_router_gateway version_added: "1.2" +author: "Benno Joy (@bennojoy)" short_description: set/unset a gateway interface for the router with the specified external network description: - Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic. diff --git a/cloud/openstack/quantum_router_interface.py b/cloud/openstack/quantum_router_interface.py index 7d42ec6ff1d..7374b542390 100644 --- a/cloud/openstack/quantum_router_interface.py +++ b/cloud/openstack/quantum_router_interface.py @@ -30,6 +30,7 @@ DOCUMENTATION = ''' --- module: quantum_router_interface version_added: "1.2" +author: "Benno Joy (@bennojoy)" short_description: Attach/Dettach a subnet's interface to a router description: - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet. diff --git a/cloud/rackspace/rax.py b/cloud/rackspace/rax.py index 874274c22f3..dad2e2a4f8d 100644 --- a/cloud/rackspace/rax.py +++ b/cloud/rackspace/rax.py @@ -182,7 +182,9 @@ options: description: - how long before wait gives up, in seconds default: 300 -author: Jesse Keating, Matt Martz +author: + - "Jesse Keating (@j2sol)" + - "Matt Martz (@sivel)" notes: - I(exact_count) can be "destructive" if the number of running servers in the I(group) is larger than that specified in I(count). In such a case, the diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py index 6f922f0128e..ac4e8de424f 100644 --- a/cloud/rackspace/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -79,7 +79,9 @@ options: description: - how long before wait gives up, in seconds default: 300 -author: Christopher H. Laco, Matt Martz +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_cbs_attachments.py b/cloud/rackspace/rax_cbs_attachments.py index 870b8e611df..d762b797457 100644 --- a/cloud/rackspace/rax_cbs_attachments.py +++ b/cloud/rackspace/rax_cbs_attachments.py @@ -58,7 +58,9 @@ options: description: - how long before wait gives up, in seconds default: 300 -author: Christopher H. Laco, Matt Martz +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_cdb.py b/cloud/rackspace/rax_cdb.py index 55e486f79e5..6abadd2ebf4 100644 --- a/cloud/rackspace/rax_cdb.py +++ b/cloud/rackspace/rax_cdb.py @@ -52,7 +52,7 @@ options: description: - how long before wait gives up, in seconds default: 300 -author: Simon JAILLET +author: "Simon JAILLET (@jails)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_cdb_database.py b/cloud/rackspace/rax_cdb_database.py index cc7885ee31e..bfd5dbbf128 100644 --- a/cloud/rackspace/rax_cdb_database.py +++ b/cloud/rackspace/rax_cdb_database.py @@ -44,7 +44,7 @@ options: - Indicate desired state of the resource choices: ['present', 'absent'] default: present -author: Simon JAILLET +author: "Simon JAILLET (@jails)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_cdb_user.py b/cloud/rackspace/rax_cdb_user.py index a0958084c92..e5169def8df 100644 --- a/cloud/rackspace/rax_cdb_user.py +++ b/cloud/rackspace/rax_cdb_user.py @@ -51,7 +51,7 @@ options: - Indicate desired state of the resource choices: ['present', 'absent'] default: present -author: Simon JAILLET +author: "Simon JAILLET (@jails)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_clb.py b/cloud/rackspace/rax_clb.py index a3deae6f4a7..af46d82e0b4 100644 --- a/cloud/rackspace/rax_clb.py +++ b/cloud/rackspace/rax_clb.py @@ -103,7 +103,9 @@ options: description: - how long before wait gives up, in seconds default: 300 -author: Christopher H. Laco, Matt Martz +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_clb_nodes.py b/cloud/rackspace/rax_clb_nodes.py index 472fad19b1c..d832f5f26eb 100644 --- a/cloud/rackspace/rax_clb_nodes.py +++ b/cloud/rackspace/rax_clb_nodes.py @@ -85,7 +85,7 @@ options: required: false description: - Weight of node -author: Lukasz Kawczynski +author: "Lukasz Kawczynski (@neuroid)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_dns.py b/cloud/rackspace/rax_dns.py index dacc4c672fe..b5dbe40b2e9 100644 --- a/cloud/rackspace/rax_dns.py +++ b/cloud/rackspace/rax_dns.py @@ -48,7 +48,7 @@ notes: - "It is recommended that plays utilizing this module be run with C(serial: 1) to avoid exceeding the API request limit imposed by the Rackspace CloudDNS API" -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_dns_record.py b/cloud/rackspace/rax_dns_record.py index a28f5b9a9b3..825bb3f21a2 100644 --- a/cloud/rackspace/rax_dns_record.py +++ b/cloud/rackspace/rax_dns_record.py @@ -84,7 +84,7 @@ notes: supplied - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. - C(PTR) record support was added in version 1.7 -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_facts.py b/cloud/rackspace/rax_facts.py index 68ef446f760..c30df5b9462 100644 --- a/cloud/rackspace/rax_facts.py +++ b/cloud/rackspace/rax_facts.py @@ -35,7 +35,7 @@ options: description: - Server name to retrieve facts for default: null -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_files.py b/cloud/rackspace/rax_files.py index 3c54b0a9e2f..c3f9e8ddec4 100644 --- a/cloud/rackspace/rax_files.py +++ b/cloud/rackspace/rax_files.py @@ -76,7 +76,7 @@ options: web_index: description: - Sets an object to be presented as the HTTP index page when accessed by the CDN URL -author: Paul Durivage +author: "Paul Durivage (@angstwad)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_files_objects.py b/cloud/rackspace/rax_files_objects.py index f2510477674..0274a79004d 100644 --- a/cloud/rackspace/rax_files_objects.py +++ b/cloud/rackspace/rax_files_objects.py @@ -92,7 +92,7 @@ options: - file - meta default: file -author: Paul Durivage +author: "Paul Durivage (@angstwad)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_identity.py b/cloud/rackspace/rax_identity.py index 47b4cb60cf0..a0697742b8e 100644 --- a/cloud/rackspace/rax_identity.py +++ b/cloud/rackspace/rax_identity.py @@ -29,7 +29,9 @@ options: - Indicate desired state of the resource choices: ['present', 'absent'] default: present -author: Christopher H. Laco, Matt Martz +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_keypair.py b/cloud/rackspace/rax_keypair.py index 8f38abc12e0..e8cadd968fb 100644 --- a/cloud/rackspace/rax_keypair.py +++ b/cloud/rackspace/rax_keypair.py @@ -39,7 +39,7 @@ options: - present - absent default: present -author: Matt Martz +author: "Matt Martz (@sivel)" notes: - Keypairs cannot be manipulated, only created and deleted. To "update" a keypair you must first delete and then recreate. diff --git a/cloud/rackspace/rax_meta.py b/cloud/rackspace/rax_meta.py index 2e1d90f5389..1c6c3bfd234 100644 --- a/cloud/rackspace/rax_meta.py +++ b/cloud/rackspace/rax_meta.py @@ -39,7 +39,7 @@ options: description: - A hash of metadata to associate with the instance default: null -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_network.py b/cloud/rackspace/rax_network.py index bd23f5f878d..cea7531a8db 100644 --- a/cloud/rackspace/rax_network.py +++ b/cloud/rackspace/rax_network.py @@ -39,7 +39,9 @@ options: description: - cidr of the network being created default: null -author: Christopher H. Laco, Jesse Keating +author: + - "Christopher H. Laco (@claco)" + - "Jesse Keating (@j2sol)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_queue.py b/cloud/rackspace/rax_queue.py index d3e5ac3f81e..8b1a60e4b81 100644 --- a/cloud/rackspace/rax_queue.py +++ b/cloud/rackspace/rax_queue.py @@ -35,7 +35,9 @@ options: - present - absent default: present -author: Christopher H. Laco, Matt Martz +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py index 64783397016..e6c14fdef0f 100644 --- a/cloud/rackspace/rax_scaling_group.py +++ b/cloud/rackspace/rax_scaling_group.py @@ -105,7 +105,7 @@ options: - Data to be uploaded to the servers config drive. This option implies I(config_drive). Can be a file path or a string version_added: 1.8 -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' @@ -263,7 +263,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, lc = sg.get_launch_config() lc_args = {} if server_name != lc.get('name'): - lc_args['name'] = server_name + lc_args['server_name'] = server_name if image != lc.get('image'): lc_args['image'] = image @@ -273,7 +273,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, disk_config = disk_config or 'AUTO' if ((disk_config or lc.get('disk_config')) and - disk_config != lc.get('disk_config')): + disk_config != lc.get('disk_config', 'AUTO')): lc_args['disk_config'] = disk_config if (meta or lc.get('meta')) and meta != lc.get('metadata'): @@ -299,7 +299,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, if key_name != lc.get('key_name'): lc_args['key_name'] = key_name - if config_drive != lc.get('config_drive'): + if config_drive != lc.get('config_drive', False): lc_args['config_drive'] = config_drive if (user_data and diff --git a/cloud/rackspace/rax_scaling_policy.py b/cloud/rackspace/rax_scaling_policy.py index b3da82460d8..b216ca13274 100644 --- a/cloud/rackspace/rax_scaling_policy.py +++ b/cloud/rackspace/rax_scaling_policy.py @@ -73,7 +73,7 @@ options: - present - absent default: present -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 62057759172..9ed6ede21c2 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -65,13 +65,13 @@ options: default: null state: description: - - Indicate desired state of the vm. + - Indicate desired state of the vm. 'reconfigured' only applies changes to 'memory_mb' and 'num_cpus' in vm_hardware parameter, and only when hot-plugging is enabled for the guest. default: present choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (cannot be ran with state) + - Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. default: no choices: ['yes', 'no'] template_src: @@ -79,6 +79,12 @@ options: description: - Name of the source template to deploy from default: None + snapshot_to_clone: + description: + - A string that when specified, will create a linked clone copy of the VM. Snapshot must already be taken in vCenter. + version_added: "2.0" + required: false + default: none vm_disk: description: - A key, value list of disks and their sizes and which datastore to keep it in. @@ -119,7 +125,7 @@ options: notes: - This module should run from a system that can access vSphere directly. Either by using local_action, or using delegate_to. -author: Richard Hoop +author: "Richard Hoop (@rhoop) " requirements: - "python >= 2.6" - pysphere @@ -153,11 +159,18 @@ EXAMPLES = ''' type: vmxnet3 network: VM Network network_type: standard + nic2: + type: vmxnet3 + network: dvSwitch Network + network_type: dvs vm_hardware: memory_mb: 2048 num_cpus: 2 osid: centos64Guest scsi: paravirtual + vm_cdrom: + type: "iso" + iso_path: "DatastoreName/cd-image.iso" esxi: datacenter: MyDatacenter hostname: esx001.mydomain.local @@ -195,7 +208,6 @@ EXAMPLES = ''' hostname: esx001.mydomain.local # Deploy a guest from a template -# No reconfiguration of the destination guest is done at this stage, a reconfigure would be needed to adjust memory/cpu etc.. - vsphere_guest: vcenter_hostname: vcenter.mydomain.local username: myuser @@ -410,13 +422,21 @@ def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name= def find_datastore(module, s, datastore, config_target): # Verify the datastore exists and put it in brackets if it does. ds = None - for d in config_target.Datastore: - if (d.Datastore.Accessible and - (datastore and d.Datastore.Name == datastore) - or (not datastore)): - ds = d.Datastore.Datastore - datastore = d.Datastore.Name - break + if config_target: + for d in config_target.Datastore: + if (d.Datastore.Accessible and + (datastore and d.Datastore.Name == datastore) + or (not datastore)): + ds = d.Datastore.Datastore + datastore = d.Datastore.Name + break + else: + for ds_mor, ds_name in server.get_datastores().items(): + ds_props = VIProperty(s, ds_mor) + if (ds_props.summary.accessible and (datastore and ds_name == datastore) + or (not datastore)): + ds = ds_mor + datastore = ds_name if not ds: s.disconnect() module.fail_json(msg="Datastore: %s does not appear to exist" % @@ -515,26 +535,78 @@ def vmdisk_id(vm, current_datastore_name): return id_list -def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name): +def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone): vmTemplate = vsphere_client.get_vm_by_name(template_src) vmTarget = None - try: - cluster = [k for k, - v in vsphere_client.get_clusters().items() if v == cluster_name][0] - except IndexError, e: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find Cluster named: %s" % - cluster_name) + if esxi: + datacenter = esxi['datacenter'] + esxi_hostname = esxi['hostname'] - try: - rpmor = [k for k, v in vsphere_client.get_resource_pools( - from_mor=cluster).items() - if v == resource_pool][0] - except IndexError, e: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find Resource Pool named: %s" % - resource_pool) + # Datacenter managed object reference + dclist = [k for k, + v in vsphere_client.get_datacenters().items() if v == datacenter] + if dclist: + dcmor=dclist[0] + else: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find datacenter named: %s" % datacenter) + + dcprops = VIProperty(vsphere_client, dcmor) + + # hostFolder managed reference + hfmor = dcprops.hostFolder._obj + + # Grab the computerResource name and host properties + crmors = vsphere_client._retrieve_properties_traversal( + property_names=['name', 'host'], + from_node=hfmor, + obj_type='ComputeResource') + + # Grab the host managed object reference of the esxi_hostname + try: + hostmor = [k for k, + v in vsphere_client.get_hosts().items() if v == esxi_hostname][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname) + + # Grab the computeResource managed object reference of the host we are + # creating the VM on. + crmor = None + for cr in crmors: + if crmor: + break + for p in cr.PropSet: + if p.Name == "host": + for h in p.Val.get_element_ManagedObjectReference(): + if h == hostmor: + crmor = cr.Obj + break + if crmor: + break + crprops = VIProperty(vsphere_client, crmor) + + rpmor = crprops.resourcePool._obj + elif resource_pool: + try: + cluster = [k for k, + v in vsphere_client.get_clusters().items() if v == cluster_name][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find Cluster named: %s" % + cluster_name) + + try: + rpmor = [k for k, v in vsphere_client.get_resource_pools( + from_mor=cluster).items() + if v == resource_pool][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find Resource Pool named: %s" % + resource_pool) + else: + module.fail_json(msg="You need to specify either esxi:[datacenter,hostname] or [cluster,resource_pool]") try: vmTarget = vsphere_client.get_vm_by_name(guest) @@ -547,9 +619,14 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo try: if vmTarget: changed = False + elif snapshot_to_clone is not None: + #check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone. + vmTemplate.clone(guest, resourcepool=rpmor, linked=True, snapshot=snapshot_to_clone) + changed = True else: vmTemplate.clone(guest, resourcepool=rpmor) changed = True + vsphere_client.disconnect() module.exit_json(changed=changed) except Exception as e: @@ -564,13 +641,14 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name changes = {} request = VI.ReconfigVM_TaskRequestMsg() shutdown = False + poweron = vm.is_powered_on() memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled) cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled) cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled) # Change Memory - if vm_hardware['memory_mb']: + if 'memory_mb' in vm_hardware: if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB: spec = spec_singleton(spec, request, vm) @@ -600,7 +678,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name changes['memory'] = vm_hardware['memory_mb'] # ====( Config Memory )====# - if vm_hardware['num_cpus']: + if 'num_cpus' in vm_hardware: if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU: spec = spec_singleton(spec, request, vm) @@ -654,7 +732,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name module.fail_json( msg="Error reconfiguring vm: %s" % task.get_error_message()) - if vm.is_powered_off(): + if vm.is_powered_off() and poweron: try: vm.power_on(sync_run=True) except Exception, e: @@ -1150,9 +1228,10 @@ def main(): 'reconfigured' ], default='present'), - vmware_guest_facts=dict(required=False, choices=BOOLEANS), - from_template=dict(required=False, choices=BOOLEANS), + vmware_guest_facts=dict(required=False, type='bool'), + from_template=dict(required=False, type='bool'), template_src=dict(required=False, type='str'), + snapshot_to_clone=dict(required=False, default=None, type='str'), guest=dict(required=True, type='str'), vm_disk=dict(required=False, type='dict', default={}), vm_nic=dict(required=False, type='dict', default={}), @@ -1161,7 +1240,7 @@ def main(): vm_hw_version=dict(required=False, default=None, type='str'), resource_pool=dict(required=False, default=None, type='str'), cluster=dict(required=False, default=None, type='str'), - force=dict(required=False, choices=BOOLEANS, default=False), + force=dict(required=False, type='bool', default=False), esxi=dict(required=False, type='dict', default={}), @@ -1177,8 +1256,7 @@ def main(): 'vm_hardware', 'esxi' ], - ['resource_pool', 'cluster'], - ['from_template', 'resource_pool', 'template_src'] + ['from_template', 'template_src'], ], ) @@ -1202,6 +1280,8 @@ def main(): cluster = module.params['cluster'] template_src = module.params['template_src'] from_template = module.params['from_template'] + snapshot_to_clone = module.params['snapshot_to_clone'] + # CONNECT TO THE SERVER viserver = VIServer() @@ -1281,7 +1361,8 @@ def main(): guest=guest, template_src=template_src, module=module, - cluster_name=cluster + cluster_name=cluster, + snapshot_to_clone=snapshot_to_clone ) if state in ['restarted', 'reconfigured']: module.fail_json( @@ -1321,6 +1402,6 @@ def main(): # this is magic, see lib/ansible/module_common.py -#<> +from ansible.module_utils.basic import * if __name__ == '__main__': main() diff --git a/commands/command.py b/commands/command.py index 131fc4c7ffc..dbb23949273 100644 --- a/commands/command.py +++ b/commands/command.py @@ -21,6 +21,7 @@ import copy import sys import datetime +import glob import traceback import re import shlex @@ -47,12 +48,12 @@ options: aliases: [] creates: description: - - a filename, when it already exists, this step will B(not) be run. + - a filename or glob pattern, when it already exists, this step will B(not) be run. required: no default: null removes: description: - - a filename, when it does not exist, this step will B(not) be run. + - a filename or glob pattern, when it does not exist, this step will B(not) be run. version_added: "0.8" required: no default: null @@ -81,7 +82,9 @@ notes: M(command) module is much more secure as it's not affected by the user's environment. - " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this." -author: Michael DeHaan +author: + - Ansible Core Team + - Michael DeHaan ''' EXAMPLES = ''' @@ -154,12 +157,22 @@ def main(): # the command module is the one ansible module that does not take key=value args # hence don't copy this one if you are looking to build others! - module = CommandModule(argument_spec=dict()) + module = AnsibleModule( + argument_spec=dict( + _raw_params = dict(), + _uses_shell = dict(type='bool', default=False), + chdir = dict(), + executable = dict(), + creates = dict(), + removes = dict(), + warn = dict(type='bool', default=True), + ) + ) - shell = module.params['shell'] + shell = module.params['_uses_shell'] chdir = module.params['chdir'] executable = module.params['executable'] - args = module.params['args'] + args = module.params['_raw_params'] creates = module.params['creates'] removes = module.params['removes'] warn = module.params['warn'] @@ -168,6 +181,7 @@ def main(): module.fail_json(rc=256, msg="no command given") if chdir: + chdir = os.path.abspath(os.path.expanduser(chdir)) os.chdir(chdir) if creates: @@ -175,7 +189,7 @@ def main(): # and the filename already exists. This allows idempotence # of command executions. v = os.path.expanduser(creates) - if os.path.exists(v): + if glob.glob(v): module.exit_json( cmd=args, stdout="skipped, since %s exists" % v, @@ -189,7 +203,7 @@ def main(): # and the filename does not exist. This allows idempotence # of command executions. v = os.path.expanduser(removes) - if not os.path.exists(v): + if not glob.glob(v): module.exit_json( cmd=args, stdout="skipped, since %s does not exist" % v, @@ -232,48 +246,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.splitter import * -# only the command module should ever need to do this -# everything else should be simple key=value - -class CommandModule(AnsibleModule): - - def _handle_aliases(self): - return {} - - def _check_invalid_arguments(self): - pass - - def _load_params(self): - ''' read the input and return a dictionary and the arguments string ''' - args = MODULE_ARGS - params = copy.copy(OPTIONS) - params['shell'] = False - if "#USE_SHELL" in args: - args = args.replace("#USE_SHELL", "") - params['shell'] = True - - items = split_args(args) - - for x in items: - quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'") - if '=' in x and not quoted: - # check to see if this is a special parameter for the command - k, v = x.split('=', 1) - v = unquote(v.strip()) - if k in OPTIONS.keys(): - if k == "chdir": - v = os.path.abspath(os.path.expanduser(v)) - if not (os.path.exists(v) and os.path.isdir(v)): - self.fail_json(rc=258, msg="cannot change to directory '%s': path does not exist" % v) - elif k == "executable": - v = os.path.abspath(os.path.expanduser(v)) - if not (os.path.exists(v)): - self.fail_json(rc=258, msg="cannot use executable '%s': file does not exist" % v) - params[k] = v - # Remove any of the above k=v params from the args string - args = PARAM_REGEX.sub('', args) - params['args'] = args.strip() - - return (params, params['args']) - main() diff --git a/commands/raw.py b/commands/raw.py index 87f2b5c4bdc..5305c978630 100644 --- a/commands/raw.py +++ b/commands/raw.py @@ -34,7 +34,9 @@ notes: playbooks will follow the trend of using M(command) unless M(shell) is explicitly required. When running ad-hoc commands, use your best judgement. -author: Michael DeHaan +author: + - Ansible Core Team + - Michael DeHaan ''' EXAMPLES = ''' diff --git a/commands/script.py b/commands/script.py index 01a1ae34e71..ccf15331a6c 100644 --- a/commands/script.py +++ b/commands/script.py @@ -32,7 +32,9 @@ options: version_added: "1.5" notes: - It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points! -author: Michael DeHaan +author: + - Ansible Core Team + - Michael DeHaan """ EXAMPLES = ''' diff --git a/commands/shell.py b/commands/shell.py index b63a21080ee..cccc90f05ff 100644 --- a/commands/shell.py +++ b/commands/shell.py @@ -57,7 +57,9 @@ notes: "{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons. requirements: [ ] -author: Michael DeHaan +author: + - Ansible Core Team + - Michael DeHaan ''' EXAMPLES = ''' diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 16ddf93e7a5..c018ad143db 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -89,7 +89,7 @@ notes: the credentials from C(~/.my.cnf), and finally fall back to using the MySQL default login of C(root) with no password. requirements: [ ConfigParser ] -author: Mark Theunissen +author: "Mark Theunissen (@marktheunissen)" ''' EXAMPLES = ''' @@ -111,6 +111,7 @@ import ConfigParser import os import pipes import stat +import subprocess try: import MySQLdb except ImportError: @@ -142,14 +143,20 @@ def db_dump(module, host, user, password, db_name, target, all_databases, port, cmd += " --all-databases" else: cmd += " %s" % pipes.quote(db_name) + + path = None if os.path.splitext(target)[-1] == '.gz': - cmd = cmd + ' | gzip > ' + pipes.quote(target) + path = module.get_bin_path('gzip', True) elif os.path.splitext(target)[-1] == '.bz2': - cmd = cmd + ' | bzip2 > ' + pipes.quote(target) + path = module.get_bin_path('bzip2', True) elif os.path.splitext(target)[-1] == '.xz': - cmd = cmd + ' | xz > ' + pipes.quote(target) + path = module.get_bin_path('xz', True) + + if path: + cmd = '%s | %s > %s' % (cmd, path, pipes.quote(target)) else: cmd += " > %s" % pipes.quote(target) + rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) return rc, stdout, stderr @@ -157,69 +164,44 @@ def db_import(module, host, user, password, db_name, target, all_databases, port if not os.path.exists(target): return module.fail_json(msg="target %s does not exist on the host" % target) - cmd = module.get_bin_path('mysql', True) - cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password)) + cmd = [module.get_bin_path('mysql', True)] + if user: + cmd.append("--user=%s" % pipes.quote(user)) + if password: + cmd.append("--password=%s" % pipes.quote(password)) if socket is not None: - cmd += " --socket=%s" % pipes.quote(socket) + cmd.append("--socket=%s" % pipes.quote(socket)) else: - cmd += " --host=%s --port=%i" % (pipes.quote(host), port) + cmd.append("--host=%s" % pipes.quote(host)) + cmd.append("--port=%i" % port) if not all_databases: - cmd += " -D %s" % pipes.quote(db_name) + cmd.append("-D") + cmd.append(pipes.quote(db_name)) + + comp_prog_path = None if os.path.splitext(target)[-1] == '.gz': - gzip_path = module.get_bin_path('gzip') - if not gzip_path: - module.fail_json(msg="gzip command not found") - #gzip -d file (uncompress) - rc, stdout, stderr = module.run_command('%s -d %s' % (gzip_path, target)) - if rc != 0: - return rc, stdout, stderr - #Import sql - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) - try: - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - return rc, stdout, stderr - finally: - #gzip file back up - module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0])) + comp_prog_path = module.get_bin_path('gzip', required=True) elif os.path.splitext(target)[-1] == '.bz2': - bzip2_path = module.get_bin_path('bzip2') - if not bzip2_path: - module.fail_json(msg="bzip2 command not found") - #bzip2 -d file (uncompress) - rc, stdout, stderr = module.run_command('%s -d %s' % (bzip2_path, target)) - if rc != 0: - return rc, stdout, stderr - #Import sql - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) - try: - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - return rc, stdout, stderr - finally: - #bzip2 file back up - rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0])) + comp_prog_path = module.get_bin_path('bzip2', required=True) elif os.path.splitext(target)[-1] == '.xz': - xz_path = module.get_bin_path('xz') - if not xz_path: - module.fail_json(msg="xz command not found") - #xz -d file (uncompress) - rc, stdout, stderr = module.run_command('%s -d %s' % (xz_path, target)) - if rc != 0: - return rc, stdout, stderr - #Import sql - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) - try: - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - return rc, stdout, stderr - finally: - #xz file back up - rc, stdout, stderr = module.run_command('%s %s' % (xz_path, os.path.splitext(target)[0])) + comp_prog_path = module.get_bin_path('xz', required=True) + + if comp_prog_path: + p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdout2, stderr2) = p2.communicate() + p1.stdout.close() + p1.wait() + if p1.returncode != 0: + stderr1 = p1.stderr.read() + return p1.returncode, '', stderr1 + else: + return p2.returncode, stdout2, stderr2 else: + cmd = ' '.join(cmd) cmd += " < %s" % pipes.quote(target) rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - return rc, stdout, stderr + return rc, stdout, stderr def db_create(cursor, db, encoding, collation): query_params = dict(enc=encoding, collate=collation) @@ -344,7 +326,7 @@ def main(): if state in ['dump','import']: if target is None: module.fail_json(msg="with state=%s target is required" % (state)) - if db == 'all': + if db == 'all': connect_to_db = 'mysql' db = 'mysql' all_databases = True @@ -370,11 +352,11 @@ def main(): db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db) cursor = db_connection.cursor() except Exception, e: + errno, errstr = e.args if "Unknown database" in str(e): - errno, errstr = e.args module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) else: - module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running") + module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running (ERROR: %s %s)" % (errno, errstr)) changed = False if db_exists(cursor, db): diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 54b63eed4d7..7c72546706a 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -90,7 +90,8 @@ options: description: - Check if mysql allows login as root/nopassword before trying supplied credentials. required: false - default: false + choices: [ "yes", "no" ] + default: "no" version_added: "1.3" update_password: required: false @@ -108,7 +109,7 @@ options: notes: - Requires the MySQLdb Python package on the remote host. For Ubuntu, this is as easy as apt-get install python-mysqldb. - - Both C(login_password) and C(login_username) are required when you are + - Both C(login_password) and C(login_user) are required when you are passing credentials. If none are present, the module will attempt to read the credentials from C(~/.my.cnf), and finally fall back to using the MySQL default login of 'root' with no password. @@ -119,7 +120,7 @@ notes: the file." requirements: [ "MySQLdb" ] -author: Mark Theunissen +author: "Mark Theunissen (@marktheunissen)" ''' EXAMPLES = """ @@ -148,8 +149,6 @@ mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL - mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock # Example .my.cnf file for setting the root password -# Note: don't use quotes around the password, because the mysql_user module -# will include them in the password but the mysql client will not [client] user=root @@ -158,6 +157,7 @@ password=n<_665{vS43y import getpass import tempfile +import re try: import MySQLdb except ImportError: @@ -244,7 +244,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs): grant_option = True if db_table not in new_priv: if user != "root" and "PROXY" not in priv and not append_privs: - privileges_revoke(cursor, user,host,db_table,grant_option) + privileges_revoke(cursor, user,host,db_table,priv,grant_option) changed = True # If the user doesn't currently have any privileges on a db.table, then @@ -261,7 +261,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs): priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table]) if (len(priv_diff) > 0): if not append_privs: - privileges_revoke(cursor, user,host,db_table,grant_option) + privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option) privileges_grant(cursor, user,host,db_table,new_priv[db_table]) changed = True @@ -292,7 +292,7 @@ def privileges_get(cursor, user,host): return x for grant in grants: - res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) + res = re.match("GRANT (.+) ON (.+) TO '.*'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) if res is None: raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0]) privileges = res.group(1).split(", ") @@ -317,17 +317,19 @@ def privileges_unpack(priv): not specified in the string, as MySQL will always provide this by default. """ output = {} + privs = [] for item in priv.strip().split('/'): pieces = item.strip().split(':') - if '.' in pieces[0]: - pieces[0] = pieces[0].split('.') - for idx, piece in enumerate(pieces): - if pieces[0][idx] != "*": - pieces[0][idx] = "`" + pieces[0][idx] + "`" - pieces[0] = '.'.join(pieces[0]) - - output[pieces[0]] = pieces[1].upper().split(',') - new_privs = frozenset(output[pieces[0]]) + dbpriv = pieces[0].rsplit(".", 1) + pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1]) + if '(' in pieces[1]: + output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper()) + for i in output[pieces[0]]: + privs.append(re.sub(r'\(.*\)','',i)) + else: + output[pieces[0]] = pieces[1].upper().split(',') + privs = output[pieces[0]] + new_privs = frozenset(privs) if not new_privs.issubset(VALID_PRIVS): raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) @@ -341,7 +343,7 @@ def privileges_unpack(priv): return output -def privileges_revoke(cursor, user,host,db_table,grant_option): +def privileges_revoke(cursor, user,host,db_table,priv,grant_option): # Escape '%' since mysql db.execute() uses a format string db_table = db_table.replace('%', '%%') if grant_option: @@ -349,7 +351,8 @@ def privileges_revoke(cursor, user,host,db_table,grant_option): query.append("FROM %s@%s") query = ' '.join(query) cursor.execute(query, (user, host)) - query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')] + priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')]) + query = ["REVOKE %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("FROM %s@%s") query = ' '.join(query) cursor.execute(query, (user, host)) @@ -358,7 +361,7 @@ def privileges_grant(cursor, user,host,db_table,priv): # Escape '%' since mysql db.execute uses a format string and the # specification of db and table often use a % (SQL wildcard) db_table = db_table.replace('%', '%%') - priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv)) + priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')]) query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("TO %s@%s") if 'GRANT' in priv: @@ -381,12 +384,12 @@ def main(): login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), - password=dict(default=None), + password=dict(default=None, no_log=True), host=dict(default="localhost"), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), - append_privs=dict(type="bool", default="no"), - check_implicit_admin=dict(default=False), + append_privs=dict(default=False, type='bool'), + check_implicit_admin=dict(default=False, type='bool'), update_password=dict(default="always", choices=["always", "on_create"]), config_file=dict(default="~/.my.cnf"), ) @@ -395,7 +398,7 @@ def main(): login_password = module.params["login_password"] user = module.params["user"] password = module.params["password"] - host = module.params["host"] + host = module.params["host"].lower() state = module.params["state"] priv = module.params["priv"] check_implicit_admin = module.params['check_implicit_admin'] diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 199c5eb6eca..d7187e85733 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -30,6 +30,7 @@ short_description: Manage MySQL global variables description: - Query / Set MySQL variables version_added: 1.3 +author: "Balazs Pocze (@banyek)" options: variable: description: @@ -51,6 +52,11 @@ options: description: - mysql host to connect required: False + login_port: + version_added: "2.0" + description: + - mysql port to connect + required: False login_unix_socket: description: - unix socket to connect mysql server @@ -67,6 +73,7 @@ EXAMPLES = ''' import ConfigParser import os import warnings +from re import match try: import MySQLdb @@ -103,10 +110,12 @@ def typedvalue(value): def getvariable(cursor, mysqlvar): - cursor.execute("SHOW VARIABLES LIKE %s", (mysqlvar,)) + cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,)) mysqlvar_val = cursor.fetchall() - return mysqlvar_val - + if len(mysqlvar_val) is 1: + return mysqlvar_val[0][1] + else: + return None def setvariable(cursor, mysqlvar, value): """ Set a global mysql variable to a given value @@ -116,11 +125,9 @@ def setvariable(cursor, mysqlvar, value): should be passed as numeric literals. """ - query = ["SET GLOBAL %s" % mysql_quote_identifier(mysqlvar, 'vars') ] - query.append(" = %s") - query = ' '.join(query) + query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars') try: - cursor.execute(query, (value,)) + cursor.execute(query + "%s", (value,)) cursor.fetchall() result = True except Exception, e: @@ -192,7 +199,8 @@ def main(): argument_spec = dict( login_user=dict(default=None), login_password=dict(default=None), - login_host=dict(default="localhost"), + login_host=dict(default="127.0.0.1"), + login_port=dict(default="3306", type='int'), login_unix_socket=dict(default=None), variable=dict(default=None), value=dict(default=None) @@ -202,8 +210,13 @@ def main(): user = module.params["login_user"] password = module.params["login_password"] host = module.params["login_host"] + port = module.params["login_port"] mysqlvar = module.params["variable"] value = module.params["value"] + if mysqlvar is None: + module.fail_json(msg="Cannot run without variable to operate with") + if match('^[0-9a-z_]+$', mysqlvar) is None: + module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar) if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") else: @@ -226,23 +239,21 @@ def main(): module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") try: if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") cursor = db_connection.cursor() except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") - if mysqlvar is None: - module.fail_json(msg="Cannot run without variable to operate with") mysqlvar_val = getvariable(cursor, mysqlvar) + if mysqlvar_val is None: + module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False) if value is None: module.exit_json(msg=mysqlvar_val) else: - if len(mysqlvar_val) < 1: - module.fail_json(msg="Variable not available", changed=False) # Type values before using them value_wanted = typedvalue(value) - value_actual = typedvalue(mysqlvar_val[0][1]) + value_actual = typedvalue(mysqlvar_val) if value_wanted == value_actual: module.exit_json(msg="Variable already set to requested value", changed=False) try: diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index 4ce8e146ccd..469d68fa0fa 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -95,7 +95,7 @@ notes: - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. requirements: [ psycopg2 ] -author: Lorin Hochstein +author: "Lorin Hochstein (@lorin)" ''' EXAMPLES = ''' diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index 22a565f6b65..8fefd3de648 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -136,7 +136,7 @@ notes: another user also, R can still access database objects via these privileges. - When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). requirements: [psycopg2] -author: Bernhard Weitzhofer +author: "Bernhard Weitzhofer (@b6d)" """ EXAMPLES = """ @@ -315,7 +315,7 @@ class Connection(object): query = """SELECT relname FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'r'""" + WHERE nspname = %s AND relkind in ('r', 'v')""" self.cursor.execute(query, (schema,)) return [t[0] for t in self.cursor.fetchall()] diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 7dda85f343c..cee5a9ae131 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -92,7 +92,7 @@ options: description: - "PostgreSQL role attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER" required: false - default: null + default: "" choices: [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB", "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ] state: @@ -137,7 +137,7 @@ notes: to all users. You may not specify password or role_attr_flags when the PUBLIC user is specified. requirements: [ psycopg2 ] -author: Lorin Hochstein +author: "Lorin Hochstein (@lorin)" ''' EXAMPLES = ''' @@ -233,7 +233,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir return False # Handle passwords. - if not no_password_changes and (password is not None or role_attr_flags is not None): + if not no_password_changes and (password is not None or role_attr_flags != ''): # Select password and all flag-like columns in order to verify changes. query_password_data = dict(password=password, expires=expires) select = "SELECT * FROM pg_authid where rolname=%(user)s" @@ -263,7 +263,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir role_attr_flags_changing = False if role_attr_flags: role_attr_flags_dict = {} - for r in role_attr_flags.split(','): + for r in role_attr_flags.split(' '): if r.startswith('NO'): role_attr_flags_dict[r.replace('NO', '', 1)] = False else: @@ -324,12 +324,21 @@ def user_delete(cursor, user): cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") return True -def has_table_privilege(cursor, user, table, priv): - if priv == 'ALL': - priv = ','.join([ p for p in VALID_PRIVS['table'] if p != 'ALL' ]) - query = 'SELECT has_table_privilege(%s, %s, %s)' - cursor.execute(query, (user, table, priv)) - return cursor.fetchone()[0] +def has_table_privileges(cursor, user, table, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_table_privileges(cursor, user, table) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) def get_table_privileges(cursor, user, table): if '.' in table: @@ -339,26 +348,21 @@ def get_table_privileges(cursor, user, table): query = '''SELECT privilege_type FROM information_schema.role_table_grants WHERE grantee=%s AND table_name=%s AND table_schema=%s''' cursor.execute(query, (user, table, schema)) - return set([x[0] for x in cursor.fetchall()]) + return frozenset([x[0] for x in cursor.fetchall()]) -def grant_table_privilege(cursor, user, table, priv): +def grant_table_privileges(cursor, user, table, privs): # Note: priv escaped by parse_privs - prev_priv = get_table_privileges(cursor, user, table) + privs = ', '.join(privs) query = 'GRANT %s ON TABLE %s TO %s' % ( - priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) + privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) cursor.execute(query) - curr_priv = get_table_privileges(cursor, user, table) - return len(curr_priv) > len(prev_priv) -def revoke_table_privilege(cursor, user, table, priv): +def revoke_table_privileges(cursor, user, table, privs): # Note: priv escaped by parse_privs - prev_priv = get_table_privileges(cursor, user, table) + privs = ', '.join(privs) query = 'REVOKE %s ON TABLE %s FROM %s' % ( - priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) + privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) cursor.execute(query) - curr_priv = get_table_privileges(cursor, user, table) - return len(curr_priv) < len(prev_priv) - def get_database_privileges(cursor, user, db): priv_map = { @@ -370,80 +374,92 @@ def get_database_privileges(cursor, user, db): cursor.execute(query, (db,)) datacl = cursor.fetchone()[0] if datacl is None: - return [] + return set() r = re.search('%s=(C?T?c?)/[a-z]+\,?' % user, datacl) if r is None: - return [] - o = [] + return set() + o = set() for v in r.group(1): - o.append(priv_map[v]) - return o + o.add(priv_map[v]) + return normalize_privileges(o, 'database') -def has_database_privilege(cursor, user, db, priv): - if priv == 'ALL': - priv = ','.join([ p for p in VALID_PRIVS['database'] if p != 'ALL' ]) - query = 'SELECT has_database_privilege(%s, %s, %s)' - cursor.execute(query, (user, db, priv)) - return cursor.fetchone()[0] +def has_database_privileges(cursor, user, db, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. -def grant_database_privilege(cursor, user, db, priv): + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_database_privileges(cursor, user, db) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) + +def grant_database_privileges(cursor, user, db, privs): # Note: priv escaped by parse_privs - prev_priv = get_database_privileges(cursor, user, db) + privs =', '.join(privs) if user == "PUBLIC": query = 'GRANT %s ON DATABASE %s TO PUBLIC' % ( - priv, pg_quote_identifier(db, 'database')) + privs, pg_quote_identifier(db, 'database')) else: query = 'GRANT %s ON DATABASE %s TO %s' % ( - priv, pg_quote_identifier(db, 'database'), + privs, pg_quote_identifier(db, 'database'), pg_quote_identifier(user, 'role')) cursor.execute(query) - curr_priv = get_database_privileges(cursor, user, db) - return len(curr_priv) > len(prev_priv) -def revoke_database_privilege(cursor, user, db, priv): +def revoke_database_privileges(cursor, user, db, privs): # Note: priv escaped by parse_privs - prev_priv = get_database_privileges(cursor, user, db) + privs = ', '.join(privs) if user == "PUBLIC": query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % ( - priv, pg_quote_identifier(db, 'database')) + privs, pg_quote_identifier(db, 'database')) else: query = 'REVOKE %s ON DATABASE %s FROM %s' % ( - priv, pg_quote_identifier(db, 'database'), + privs, pg_quote_identifier(db, 'database'), pg_quote_identifier(user, 'role')) cursor.execute(query) - curr_priv = get_database_privileges(cursor, user, db) - return len(curr_priv) < len(prev_priv) def revoke_privileges(cursor, user, privs): if privs is None: return False + revoke_funcs = dict(table=revoke_table_privileges, database=revoke_database_privileges) + check_funcs = dict(table=has_table_privileges, database=has_database_privileges) + changed = False - revoke_funcs = dict(table=revoke_table_privilege, database=revoke_database_privilege) - check_funcs = dict(table=has_table_privilege, database=has_database_privilege) for type_ in privs: for name, privileges in privs[type_].iteritems(): - for privilege in privileges: - if check_funcs[type_](cursor, user, name, privilege): - changed = revoke_funcs[type_](cursor, user, name, privilege)\ - or changed - + # Check that any of the privileges requested to be removed are + # currently granted to the user + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[0]: + revoke_funcs[type_](cursor, user, name, privileges) + changed = True return changed def grant_privileges(cursor, user, privs): if privs is None: return False - grant_funcs = dict(table=grant_table_privilege, database=grant_database_privilege) - check_funcs = dict(table=has_table_privilege, database=has_database_privilege) + + grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges) + check_funcs = dict(table=has_table_privileges, database=has_database_privileges) + + grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges) + check_funcs = dict(table=has_table_privileges, database=has_database_privileges) changed = False for type_ in privs: for name, privileges in privs[type_].iteritems(): - for privilege in privileges: - if not check_funcs[type_](cursor, user, name, privilege): - changed = grant_funcs[type_](cursor, user, name, privilege)\ - or changed - + # Check that any of the privileges requested for the user are + # currently missing + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[2]: + grant_funcs[type_](cursor, user, name, privileges) + changed = True return changed def parse_role_attrs(role_attr_flags): @@ -472,6 +488,17 @@ def parse_role_attrs(role_attr_flags): o_flags = ' '.join(flag_set) return o_flags +def normalize_privileges(privs, type_): + new_privs = set(privs) + if 'ALL' in new_privs: + new_privs.update(VALID_PRIVS[type_]) + new_privs.remove('ALL') + if 'TEMP' in new_privs: + new_privs.add('TEMPORARY') + new_privs.remove('TEMP') + + return new_privs + def parse_privs(privs, db): """ Parse privilege string to determine permissions for database db. @@ -504,6 +531,8 @@ def parse_privs(privs, db): if not priv_set.issubset(VALID_PRIVS[type_]): raise InvalidPrivsError('Invalid privs specified for %s: %s' % (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_])))) + + priv_set = normalize_privileges(priv_set, type_) o_privs[type_][name] = priv_set return o_privs diff --git a/files/acl.py b/files/acl.py index 0c568ba59a5..0c924fee94c 100644 --- a/files/acl.py +++ b/files/acl.py @@ -79,7 +79,7 @@ options: description: - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields. -author: Brian Coca +author: "Brian Coca (@bcoca)" notes: - The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed. ''' diff --git a/files/assemble.py b/files/assemble.py index a66c82f432a..ad73c7b4354 100644 --- a/files/assemble.py +++ b/files/assemble.py @@ -79,7 +79,13 @@ options: U(http://docs.python.org/2/library/re.html). required: false default: null -author: Stephen Fromm + ignore_hidden: + description: + - A boolean that controls if files that start with a '.' will be included or not. + required: false + default: false + version_added: "2.0" +author: "Stephen Fromm (@sfromm)" extends_documentation_fragment: files ''' @@ -94,7 +100,7 @@ EXAMPLES = ''' # =========================================== # Support method -def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): +def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp() tmp = os.fdopen(tmpfd,'w') @@ -105,7 +111,7 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): if compiled_regexp and not compiled_regexp.search(f): continue fragment = "%s/%s" % (src_path, f) - if not os.path.isfile(fragment): + if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): continue fragment_content = file(fragment).read() @@ -148,6 +154,7 @@ def main(): backup=dict(default=False, type='bool'), remote_src=dict(default=False, type='bool'), regexp = dict(required=False), + ignore_hidden = dict(default=False, type='bool'), ), add_file_common_args=True ) @@ -162,6 +169,7 @@ def main(): delimiter = module.params['delimiter'] regexp = module.params['regexp'] compiled_regexp = None + ignore_hidden = module.params['ignore_hidden'] if not os.path.exists(src): module.fail_json(msg="Source (%s) does not exist" % src) @@ -175,7 +183,7 @@ def main(): except re.error, e: module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp)) - path = assemble_from_fragments(src, delimiter, compiled_regexp) + path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden) path_hash = module.sha1(path) if os.path.exists(dest): diff --git a/files/copy.py b/files/copy.py index e80ed805539..ad56800764b 100644 --- a/files/copy.py +++ b/files/copy.py @@ -63,21 +63,13 @@ options: force: description: - the default is C(yes), which will replace the remote file when contents - are different than the source. If C(no), the file will only be transferred + are different than the source. If C(no), the file will only be transferred if the destination does not exist. version_added: "1.1" required: false choices: [ "yes", "no" ] default: "yes" aliases: [ "thirsty" ] - validate: - description: - - The validation command to run before copying into place. The path to the file to - validate is passed in via '%s' which must be present as in the visudo example below. - The command is passed securely so shell features like expansion and pipes won't work. - required: false - default: "" - version_added: "1.2" directory_mode: description: - When doing a recursive copy set the mode for the directories. If this is not set we will use the system @@ -86,7 +78,10 @@ options: required: false version_added: "1.5" extends_documentation_fragment: files -author: Michael DeHaan +extends_documentation_fragment: validate +author: + - "Ansible Core Team" + - "Michael DeHaan" notes: - The "copy" module recursively copy facility does not scale to lots (>hundreds) of files. For alternative, see synchronize module, which is a wrapper around rsync. diff --git a/files/fetch.py b/files/fetch.py index 37ead3a7352..b8234374976 100644 --- a/files/fetch.py +++ b/files/fetch.py @@ -50,7 +50,9 @@ options: will use the basename of the source file, similar to the copy module. Obviously this is only handy if the filenames are unique. requirements: [] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/files/file.py b/files/file.py index 8da87b0707e..c3267f7f18b 100644 --- a/files/file.py +++ b/files/file.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import errno import shutil import stat import grp @@ -34,21 +35,23 @@ module: file version_added: "historical" short_description: Sets attributes of files extends_documentation_fragment: files -description: +description: - Sets attributes of files, symlinks, and directories, or removes files/symlinks/directories. Many other modules support the same options as the M(file) module - including M(copy), M(template), and M(assemble). notes: - See also M(copy), M(template), M(assemble) requirements: [ ] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" options: path: description: - 'path to the file being managed. Aliases: I(dest), I(name)' required: true default: [] - aliases: ['dest', 'name'] + aliases: ['dest', 'name'] state: description: - If C(directory), all immediate subdirectories will be created if they @@ -66,7 +69,6 @@ options: src: required: false default: null - choices: [] description: - path of the file to link to (applies only to C(state=link)). Will accept absolute, relative and nonexisting paths. Relative paths are not expanded. @@ -82,7 +84,7 @@ options: default: "no" choices: [ "yes", "no" ] description: - - 'force the creation of the symlinks in two cases: the source file does + - 'force the creation of the symlinks in two cases: the source file does not exist (but will appear later); the destination exists and is a file (so, we need to unlink the "path" file and create symlink to the "src" file in place of it).' ''' @@ -102,6 +104,9 @@ EXAMPLES = ''' # touch the same file, but add/remove some permissions - file: path=/etc/foo.conf state=touch mode="u+rw,g-wx,o-rwx" +# create a directory if it doesn't exist +- file: path=/etc/some_directory state=directory mode=0755 + ''' @@ -150,8 +155,8 @@ def main(): state = dict(choices=['file','directory','link','hard','touch','absent'], default=None), path = dict(aliases=['dest', 'name'], required=True), original_basename = dict(required=False), # Internal use only, for recursive ops - recurse = dict(default='no', type='bool'), - force = dict(required=False,default=False,type='bool'), + recurse = dict(default=False, type='bool'), + force = dict(required=False, default=False, type='bool'), diff_peek = dict(default=None), validate = dict(required=False, default=None), src = dict(required=False, default=None), @@ -266,20 +271,30 @@ def main(): module.exit_json(changed=True) changed = True curpath = '' - # Split the path so we can apply filesystem attributes recursively - # from the root (/) directory for absolute paths or the base path - # of a relative path. We can then walk the appropriate directory - # path to apply attributes. - for dirname in path.strip('/').split('/'): - curpath = '/'.join([curpath, dirname]) - # Remove leading slash if we're creating a relative path - if not os.path.isabs(path): - curpath = curpath.lstrip('/') - if not os.path.exists(curpath): - os.mkdir(curpath) - tmp_file_args = file_args.copy() - tmp_file_args['path']=curpath - changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + + try: + # Split the path so we can apply filesystem attributes recursively + # from the root (/) directory for absolute paths or the base path + # of a relative path. We can then walk the appropriate directory + # path to apply attributes. + for dirname in path.strip('/').split('/'): + curpath = '/'.join([curpath, dirname]) + # Remove leading slash if we're creating a relative path + if not os.path.isabs(path): + curpath = curpath.lstrip('/') + if not os.path.exists(curpath): + try: + os.mkdir(curpath) + except OSError, ex: + # Possibly something else created the dir since the os.path.exists + # check above. As long as it's a dir, we don't need to error out. + if not (ex.errno == errno.EEXISTS and os.isdir(curpath)): + raise + tmp_file_args = file_args.copy() + tmp_file_args['path']=curpath + changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + except Exception, e: + module.fail_json(path=path, msg='There was an issue creating %s as requested: %s' % (curpath, str(e))) # We already know prev_state is not 'absent', therefore it exists in some form. elif prev_state != 'directory': diff --git a/files/find.py b/files/find.py index d5441aad273..659ec16026e 100644 --- a/files/find.py +++ b/files/find.py @@ -53,6 +53,11 @@ options: - One or more (shell type) file glob patterns, which restrict the list of files to be returned to those whose basenames match at least one of the patterns specified. Multiple patterns can be specified using a list. + contains: + required: false + default: null + description: + - One or more re patterns which should be matched against the file content paths: required: true aliases: [ "name" ] @@ -96,7 +101,7 @@ options: default: "False" choices: [ True, False ] description: - - Set this to true to follow symlinks in path. + - Set this to true to follow symlinks in path for systems with python 2.6+ get_checksum: required: false default: "False" @@ -177,6 +182,23 @@ def sizefilter(st, size): return False +def contentfilter(fsname, pattern): + '''filter files which contain the given expression''' + if pattern is None: return True + + try: + f = open(fsname) + prog = re.compile(pattern) + for line in f: + if prog.match (line): + f.close() + return True + + f.close() + except: + pass + + return False def statinfo(st): return { @@ -216,6 +238,7 @@ def main(): argument_spec = dict( paths = dict(required=True, aliases=['name'], type='list'), patterns = dict(default=['*'], type='list'), + contains = dict(default=None, type='str'), file_type = dict(default="file", choices=['file', 'directory'], type='str'), age = dict(default=None, type='str'), age_stamp = dict(default="mtime", choices=['atime','mtime','ctime'], type='str'), @@ -258,8 +281,10 @@ def main(): looked = 0 for npath in params['paths']: if os.path.isdir(npath): - for root,dirs,files in os.walk( npath, followlinks=params['follow'] ): + ''' ignore followlinks for python version < 2.6 ''' + for root,dirs,files in (sys.version_info < (2,6,0) and os.walk(npath)) or \ + os.walk( npath, followlinks=params['follow']): looked = looked + len(files) + len(dirs) for fsobj in (files + dirs): fsname=os.path.normpath(os.path.join(root, fsobj)) @@ -278,7 +303,8 @@ def main(): elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file': if pfilter(fsobj, params['patterns']) and \ agefilter(st, now, age, params['age_stamp']) and \ - sizefilter(st, size): + sizefilter(st, size) and \ + contentfilter(fsname, params['contains']): r.update(statinfo(st)) if params['get_checksum']: diff --git a/files/ini_file.py b/files/ini_file.py index e247c265fc8..fff153af6ad 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -73,7 +73,7 @@ notes: Either use M(template) to create a base INI file with a C([default]) section, or use M(lineinfile) to add the missing line. requirements: [ ConfigParser ] -author: Jan-Piet Mens +author: "Jan-Piet Mens (@jpmens)" ''' EXAMPLES = ''' @@ -120,6 +120,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if cp.get(section, option): cp.remove_option(section, option) changed = True + except ConfigParser.InterpolationError: + cp.remove_option(section, option) + changed = True except: pass @@ -143,6 +146,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese except ConfigParser.NoOptionError: cp.set(section, option, value) changed = True + except ConfigParser.InterpolationError: + cp.set(section, option, value) + changed = True if changed and not module.check_mode: if backup: diff --git a/files/lineinfile.py b/files/lineinfile.py index fd589b03e8d..777f0a498a9 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -22,14 +22,16 @@ import re import os import pipes -import codecs import tempfile DOCUMENTATION = """ --- module: lineinfile -author: Daniel Hokka Zakrisson, Ahti Kitsik +author: + - "Daniel Hokka Zakrissoni (@dhozac)" + - "Ahti Kitsik (@ahtik)" extends_documentation_fragment: files +extends_documentation_fragment: validate short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression. description: @@ -115,16 +117,6 @@ options: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. - validate: - required: false - description: - - validation to run before copying into place. - Use %s in the command to indicate the current file to validate. - The command is passed securely so shell features like - expansion and pipes won't work. - required: false - default: None - version_added: "1.4" others: description: - All arguments accepted by the M(file) module also work here. @@ -244,8 +236,11 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, # Don't do backref expansion if not asked. new_line = line - if lines[index[0]] != new_line + os.linesep: - lines[index[0]] = new_line + os.linesep + if not new_line.endswith(os.linesep): + new_line += os.linesep + + if lines[index[0]] != new_line: + lines[index[0]] = new_line msg = 'line replaced' changed = True elif backrefs: @@ -370,14 +365,6 @@ def main(): line = params['line'] - # Replace escape sequences like '\n' while being sure - # not to replace octal escape sequences (\ooo) since they - # match the backref syntax. - if backrefs: - line = re.sub(r'(\\[0-9]{1,3})', r'\\\1', line) - - line = codecs.escape_decode(line)[0] - present(module, dest, params['regexp'], line, ins_aft, ins_bef, create, backup, backrefs) else: diff --git a/files/replace.py b/files/replace.py index 588af02391e..dea2c32a54f 100644 --- a/files/replace.py +++ b/files/replace.py @@ -25,8 +25,9 @@ import tempfile DOCUMENTATION = """ --- module: replace -author: Evan Kaufman +author: "Evan Kaufman (@EvanK)" extends_documentation_fragment: files +extends_documentation_fragment: validate short_description: Replace all instances of a particular string in a file using a back-referenced regular expression. description: @@ -61,12 +62,6 @@ options: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. - validate: - required: false - description: - - validation to run before copying into place - required: false - default: None others: description: - All arguments accepted by the M(file) module also work here. @@ -160,6 +155,7 @@ def main(): module.exit_json(changed=changed, msg=msg) # this is magic, see lib/ansible/module_common.py -#<> +from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/files/stat.py b/files/stat.py index ee3998f5f75..2e088fc8dbd 100644 --- a/files/stat.py +++ b/files/stat.py @@ -47,7 +47,7 @@ options: default: yes aliases: [] version_added: "1.8" -author: Bruce Pennypacker +author: "Bruce Pennypacker (@bpennypacker)" ''' EXAMPLES = ''' @@ -58,6 +58,23 @@ EXAMPLES = ''' - fail: msg="Whoops! file ownership has changed" when: st.stat.pw_name != 'root' +# Determine if a path exists and is a symlink. Note that if the path does +# not exist, and we test sym.stat.islnk, it will fail with an error. So +# therefore, we must test whether it is defined. +# Run this to understand the structure, the skipped ones do not pass the +# check performed by 'when' +- stat: path=/path/to/something + register: sym +- debug: msg="islnk isn't defined (path doesn't exist)" + when: sym.stat.islnk is not defined +- debug: msg="islnk is defined (path must exist)" + when: sym.stat.islnk is defined +- debug: msg="Path exists and is a symlink" + when: sym.stat.islnk is defined and sym.stat.islnk +- debug: msg="Path exists and isn't a symlink" + when: sym.stat.islnk is defined and sym.stat.islnk == False + + # Determine if a path exists and is a directory. Note that we need to test # both that p.stat.isdir actually exists, and also that it's set to true. - stat: path=/path/to/something @@ -233,13 +250,13 @@ stat: md5: description: md5 hash of the path returned: success, path exists and user can read stats and path supports hashing and md5 is supported - type: boolean - sample: True + type: string + sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0 checksum: description: hash of the path returned: success, path exists and user can read stats and path supports hashing - type: boolean - sample: True + type: string + sample: 50ba294cdf28c0d5bcde25708df53346825a429f pw_name: description: User name of owner returned: success, path exists and user can read stats and installed python supports it diff --git a/files/synchronize.py b/files/synchronize.py index 2fb93e4cad3..ff58f9c1032 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -152,6 +152,18 @@ options: default: required: false version_added: "1.6" + partial: + description: + - Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster. + default: no + required: false + version_added: "2.0" + verify_host: + description: + - Verify destination host key. + default: no + required: false + version_added: "2.0" notes: - rsync must be installed on both the local and remote machine. - Inspect the verbose output to validate the destination user/host/path @@ -163,7 +175,7 @@ notes: C(.rsync-filter) files to the source directory. -author: Timothy Appnel +author: "Timothy Appnel (@tima)" ''' EXAMPLES = ''' @@ -237,6 +249,8 @@ def main(): rsync_timeout = dict(type='int', default=0), rsync_opts = dict(type='list'), ssh_args = dict(type='str'), + partial = dict(default='no', type='bool'), + verify_host = dict(default='no', type='bool'), ), supports_check_mode = True ) @@ -254,6 +268,7 @@ def main(): compress = module.params['compress'] existing_only = module.params['existing_only'] dirs = module.params['dirs'] + partial = module.params['partial'] # the default of these params depends on the value of archive recursive = module.params['recursive'] links = module.params['links'] @@ -264,6 +279,7 @@ def main(): group = module.params['group'] rsync_opts = module.params['rsync_opts'] ssh_args = module.params['ssh_args'] + verify_host = module.params['verify_host'] cmd = '%s --delay-updates -F' % rsync if compress: @@ -316,10 +332,13 @@ def main(): else: private_key = '-i '+ private_key + ssh_opts = '-S none' + + if not verify_host: + ssh_opts = '%s -o StrictHostKeyChecking=no' % ssh_opts + if ssh_args: - ssh_opts = '-S none -o StrictHostKeyChecking=no %s' % ssh_args - else: - ssh_opts = '-S none -o StrictHostKeyChecking=no' + ssh_opts = '%s %s' % (ssh_opts, ssh_args) if dest_port != 22: cmd += " --rsh 'ssh %s %s -o Port=%s'" % (private_key, ssh_opts, dest_port) @@ -332,6 +351,9 @@ def main(): if rsync_opts: cmd = cmd + " " + " ".join(rsync_opts) + if partial: + cmd = cmd + " --partial" + changed_marker = '<>' cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'" diff --git a/files/template.py b/files/template.py index 7ba072fcdc1..120917f49c2 100644 --- a/files/template.py +++ b/files/template.py @@ -38,20 +38,22 @@ options: required: false choices: [ "yes", "no" ] default: "no" - validate: + force: description: - - The validation command to run before copying into place. - - The path to the file to validate is passed in via '%s' which must be present as in the visudo example below. - - validation to run before copying into place. The command is passed - securely so shell features like expansion and pipes won't work. + - the default is C(yes), which will replace the remote file when contents + are different than the source. If C(no), the file will only be transferred + if the destination does not exist. required: false - default: "" - version_added: "1.2" + choices: [ "yes", "no" ] + default: "yes" notes: - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." requirements: [] -author: Michael DeHaan +author: + - Ansible Core Team + - Michael DeHaan extends_documentation_fragment: files +extends_documentation_fragment: validate ''' EXAMPLES = ''' diff --git a/files/unarchive.py b/files/unarchive.py index 625989ffdfb..2b373a8e7fb 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -32,6 +32,7 @@ options: src: description: - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack. + - If copy=no and src contains ://, the remote machine will download the file from the url first. (version_added 2.0) required: true default: null dest: @@ -58,7 +59,7 @@ options: choices: [ "yes", "no" ] default: "no" version_added: "2.0" -author: Dylan Martin +author: "Dylan Martin (@pileofrogs)" todo: - detect changed/unchanged for .zip files - handle common unarchive args, like preserve owner/timestamp etc... @@ -81,6 +82,9 @@ EXAMPLES = ''' # Unarchive a file that is already on the remote machine - unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no + +# Unarchive a file that needs to be downloaded (added in 2.0) +- unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no ''' import re @@ -90,6 +94,9 @@ from zipfile import ZipFile # String from tar that shows the tar contents are different from the # filesystem DIFFERENCE_RE = re.compile(r': (.*) differs$') +# When downloading an archive, how much of the archive to download before +# saving to a tempfile (64k) +BUFSIZE = 65536 class UnarchiveError(Exception): pass @@ -269,11 +276,37 @@ def main(): if not os.path.exists(src): if copy: module.fail_json(msg="Source '%s' failed to transfer" % src) + # If copy=false, and src= contains ://, try and download the file to a temp directory. + elif '://' in src: + tempdir = os.path.dirname(__file__) + package = os.path.join(tempdir, str(src.rsplit('/', 1)[1])) + try: + rsp, info = fetch_url(module, src) + f = open(package, 'w') + # Read 1kb at a time to save on ram + while True: + data = rsp.read(BUFSIZE) + + if data == "": + break # End of file, break while loop + + f.write(data) + f.close() + src = package + except Exception, e: + module.fail_json(msg="Failure downloading %s, %s" % (src, e)) else: module.fail_json(msg="Source '%s' does not exist" % src) if not os.access(src, os.R_OK): module.fail_json(msg="Source '%s' not readable" % src) + # skip working with 0 size archives + try: + if os.path.getsize(src) == 0: + module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src) + except Exception, e: + module.fail_json(msg="Source '%s' not readable" % src) + # is dest OK to receive tar file? if not os.path.isdir(dest): module.fail_json(msg="Destination '%s' is not a directory" % dest) @@ -315,5 +348,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * if __name__ == '__main__': main() diff --git a/files/xattr.py b/files/xattr.py index 94115ae3b51..5e67e5e03a6 100644 --- a/files/xattr.py +++ b/files/xattr.py @@ -58,7 +58,7 @@ options: - if yes, dereferences symlinks and sets/gets attributes on symlink target, otherwise acts on symlink itself. -author: Brian Coca +author: "Brian Coca (@bcoca)" ''' EXAMPLES = ''' diff --git a/inventory/add_host.py b/inventory/add_host.py index b28c6f90182..2ab76b4c16a 100644 --- a/inventory/add_host.py +++ b/inventory/add_host.py @@ -22,7 +22,9 @@ options: notes: - This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it to iterate use a with\_ directive. -author: Seth Vidal +author: + - "Ansible Core Team" + - "Seth Vidal" ''' EXAMPLES = ''' diff --git a/inventory/group_by.py b/inventory/group_by.py index d09552e662c..f63bdf5912b 100644 --- a/inventory/group_by.py +++ b/inventory/group_by.py @@ -12,7 +12,7 @@ options: description: - The variables whose values will be used as groups required: true -author: Jeroen Hoekx +author: "Jeroen Hoekx (@jhoekx)" notes: - Spaces in group names are converted to dashes '-'. ''' diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 2bf37e3b129..01479260277 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -38,6 +38,8 @@ description: (see `setting the environment `_), or by using the use_proxy option. + - HTTP redirects can redirect from HTTP to HTTPS so you should be sure that + your proxy environment for both protocols is correct. version_added: "0.6" options: url: @@ -96,6 +98,12 @@ options: required: false default: 10 version_added: '1.8' + headers: + description: + - Add custom HTTP headers to a request in the format 'key:value,key:value' + required: false + default: null + version_added: '2.0' url_username: description: - The username for use in HTTP basic authentication. This parameter can be used @@ -108,13 +116,23 @@ options: parameter is not specified, the C(url_password) parameter will not be used. required: false version_added: '1.6' + force_basic_auth: + version_added: '2.0' + description: + - httplib2, the library used by the uri module only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly + send a 401, logins will fail. This option forces the sending of the Basic authentication header + upon initial request. + required: false + choices: [ "yes", "no" ] + default: "no" others: description: - all arguments accepted by the M(file) module also work here required: false # informational: requirements for nodes -requirements: [ urllib2, urlparse ] -author: Jan-Piet Mens +requirements: [ ] +author: "Jan-Piet Mens (@jpmens)" ''' EXAMPLES=''' @@ -123,8 +141,16 @@ EXAMPLES=''' - name: download file with sha256 check get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c + +- name: download file and force basic auth + get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes + +- name: download file with custom HTTP headers + get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf headers: 'key:value,key:value' ''' +import urlparse + try: import hashlib HAS_HASHLIB=True @@ -140,14 +166,14 @@ def url_filename(url): return 'index.html' return fn -def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10): +def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None): """ Download data from the url and store in a temporary file. Return (tempfile, info about the request) """ - rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout) + rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers) if info['status'] == 304: module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) @@ -197,6 +223,7 @@ def main(): dest = dict(required=True), sha256sum = dict(default=''), timeout = dict(required=False, type='int', default=10), + headers = dict(required=False, default=None), ) module = AnsibleModule( @@ -211,12 +238,42 @@ def main(): sha256sum = module.params['sha256sum'] use_proxy = module.params['use_proxy'] timeout = module.params['timeout'] + + # Parse headers to dict + if module.params['headers']: + try: + headers = dict(item.split(':') for item in module.params['headers'].split(',')) + except: + module.fail_json(msg="The header parameter requires a key:value,key:value syntax to be properly parsed.") + else: + headers = None dest_is_dir = os.path.isdir(dest) last_mod_time = None + # Remove any non-alphanumeric characters, including the infamous + # Unicode zero-width space + stripped_sha256sum = re.sub(r'\W+', '', sha256sum) + + # Fail early if sha256 is not supported + if sha256sum != '' and not HAS_HASHLIB: + module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") + if not dest_is_dir and os.path.exists(dest): - if not force: + checksum_mismatch = False + + # If the download is not forced and there is a checksum, allow + # checksum match to skip the download. + if not force and sha256sum != '': + destination_checksum = module.sha256(dest) + + if stripped_sha256sum.lower() == destination_checksum: + module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) + + checksum_mismatch = True + + # Not forcing redownload, unless sha256sum has already failed + if not force and not checksum_mismatch: module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) # If the file already exists, prepare the last modified time for the @@ -225,7 +282,7 @@ def main(): last_mod_time = datetime.datetime.utcfromtimestamp(mtime) # download to tmpsrc - tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout) + tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers) # Now the request has completed, we can finally generate the final # destination file name from the info dict. @@ -279,15 +336,7 @@ def main(): # Check the digest of the destination file and ensure that it matches the # sha256sum parameter if it is present if sha256sum != '': - # Remove any non-alphanumeric characters, including the infamous - # Unicode zero-width space - stripped_sha256sum = re.sub(r'\W+', '', sha256sum) - - if not HAS_HASHLIB: - os.remove(dest) - module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") - else: - destination_checksum = module.sha256(dest) + destination_checksum = module.sha256(dest) if stripped_sha256sum.lower() != destination_checksum: os.remove(dest) @@ -315,4 +364,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/network/basics/slurp.py b/network/basics/slurp.py index a2130c354b2..f96434f5fd3 100644 --- a/network/basics/slurp.py +++ b/network/basics/slurp.py @@ -37,7 +37,9 @@ options: notes: - "See also: M(fetch)" requirements: [] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/network/basics/uri.py b/network/basics/uri.py index ce2cc888779..3babba6d609 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -20,6 +20,7 @@ # # see examples/playbooks/uri.yml +import cgi import shutil import tempfile import base64 @@ -70,11 +71,12 @@ options: required: false choices: [ "raw", "json" ] default: raw + version_added: "2.0" method: description: - The HTTP method of the request or response. required: false - choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH" ] + choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE", "CONNECT", "REFRESH" ] default: "GET" return_content: description: @@ -131,10 +133,19 @@ options: description: - all arguments accepted by the M(file) module also work here required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only + set to C(no) used on personally controlled sites using self-signed + certificates. Prior to 1.9.2 the code defaulted to C(no). + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: '1.9.2' # informational: requirements for nodes requirements: [ urlparse, httplib2 ] -author: Romeo Theriault +author: "Romeo Theriault (@romeotheriault)" ''' EXAMPLES = ''' @@ -146,31 +157,45 @@ EXAMPLES = ''' register: webpage - action: fail - when: 'AWESOME' not in "{{ webpage.content }}" + when: "'illustrative' not in webpage.content" # Create a JIRA issue - -- uri: url=https://your.jira.example.com/rest/api/2/issue/ - method=POST user=your_username password=your_pass - body="{{ lookup('file','issue.json') }}" force_basic_auth=yes - status_code=201 HEADER_Content-Type="application/json" +- uri: + url: https://your.jira.example.com/rest/api/2/issue/ + method: POST + user: your_username + password: your_pass + body: "{{ lookup('file','issue.json') }}" + force_basic_auth: yes + status_code: 201 + body_format: json # Login to a form based webpage, then use the returned cookie to # access the app in later tasks -- uri: url=https://your.form.based.auth.examle.com/index.php - method=POST body="name=your_username&password=your_password&enter=Sign%20in" - status_code=302 HEADER_Content-Type="application/x-www-form-urlencoded" +- uri: + url: https://your.form.based.auth.examle.com/index.php + method: POST + body: "name=your_username&password=your_password&enter=Sign%20in" + status_code: 302 + HEADER_Content-Type: "application/x-www-form-urlencoded" register: login -- uri: url=https://your.form.based.auth.example.com/dashboard.php - method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}" - -# Queue build of a project in Jenkins: +- uri: + url: https://your.form.based.auth.example.com/dashboard.php + method: GET + return_content: yes + HEADER_Cookie: "{{login.set_cookie}}" -- uri: url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} - method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 +# Queue build of a project in Jenkins: +- uri: + url: "http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}" + method: GET + user: "{{ jenkins.user }}" + password: "{{ jenkins.password }}" + force_basic_auth: yes + status_code: 201 ''' @@ -188,7 +213,6 @@ try: except ImportError: HAS_URLPARSE = False - def write_file(module, url, dest, content): # create a tempfile with some test content fd, tmpsrc = tempfile.mkstemp() @@ -244,9 +268,9 @@ def url_filename(url): return fn -def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout): +def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout, validate_certs): # To debug - #httplib2.debug = 4 + #httplib2.debuglevel = 4 # Handle Redirects if redirects == "all" or redirects == "yes": @@ -260,7 +284,8 @@ def uri(module, url, dest, user, password, body, body_format, method, headers, r follow_all_redirects = False # Create a Http object and set some default options. - h = httplib2.Http(disable_ssl_certificate_validation=True, timeout=socket_timeout) + disable_validation = not validate_certs + h = httplib2.Http(disable_ssl_certificate_validation=disable_validation, timeout=socket_timeout) h.follow_all_redirects = follow_all_redirects h.follow_redirects = follow_redirects h.forward_authorization_headers = True @@ -309,10 +334,7 @@ def uri(module, url, dest, user, password, body, body_format, method, headers, r r['redirected'] = redirected r.update(resp_redir) r.update(resp) - try: - return r, unicode(content.decode('raw_unicode_escape')), dest - except: - return r, content, dest + return r, content, dest except httplib2.RedirectMissingLocation: module.fail_json(msg="A 3xx redirect response code was provided but no Location: header was provided to point to the new location.") except httplib2.RedirectLimit: @@ -329,6 +351,10 @@ def uri(module, url, dest, user, password, body, body_format, method, headers, r module.fail_json(msg="The server requested a type of HMACDigest authentication that we are unfamiliar with.") except httplib2.UnimplementedHmacDigestAuthOptionError: module.fail_json(msg="The server requested a type of HMACDigest authentication that we are unfamiliar with.") + except httplib2.CertificateHostnameMismatch: + module.fail_json(msg="The server's certificate does not match with its hostname.") + except httplib2.SSLHandshakeError: + module.fail_json(msg="Unable to validate server's certificate against available CA certs.") except socket.error, e: module.fail_json(msg="Socket error: %s to %s" % (e, url)) @@ -342,7 +368,7 @@ def main(): password = dict(required=False, default=None), body = dict(required=False, default=None), body_format = dict(required=False, default='raw', choices=['raw', 'json']), - method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH']), + method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH', 'TRACE', 'CONNECT', 'REFRESH']), return_content = dict(required=False, default='no', type='bool'), force_basic_auth = dict(required=False, default='no', type='bool'), follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), @@ -350,6 +376,7 @@ def main(): removes = dict(required=False, default=None), status_code = dict(required=False, default=[200], type='list'), timeout = dict(required=False, default=30, type='int'), + validate_certs = dict(required=False, default=True, type='bool'), ), check_invalid_arguments=False, add_file_common_args=True @@ -374,6 +401,7 @@ def main(): removes = module.params['removes'] status_code = [int(x) for x in list(module.params['status_code'])] socket_timeout = module.params['timeout'] + validate_certs = module.params['validate_certs'] dict_headers = {} @@ -415,7 +443,7 @@ def main(): # Make the request - resp, content, dest = uri(module, url, dest, user, password, body, body_format, method, dict_headers, redirects, socket_timeout) + resp, content, dest = uri(module, url, dest, user, password, body, body_format, method, dict_headers, redirects, socket_timeout, validate_certs) resp['status'] = int(resp['status']) # Write the file out if requested @@ -440,22 +468,32 @@ def main(): ukey = key.replace("-", "_") uresp[ukey] = value + # Default content_encoding to try + content_encoding = 'utf-8' if 'content_type' in uresp: - if uresp['content_type'].startswith('application/json') or \ - uresp['content_type'].startswith('text/json'): + content_type, params = cgi.parse_header(uresp['content_type']) + if 'charset' in params: + content_encoding = params['charset'] + u_content = unicode(content, content_encoding, errors='xmlcharrefreplace') + if content_type.startswith('application/json') or \ + content_type.startswith('text/json'): try: - js = json.loads(content) + js = json.loads(u_content) uresp['json'] = js except: pass + else: + u_content = unicode(content, content_encoding, errors='xmlcharrefreplace') + if resp['status'] not in status_code: - module.fail_json(msg="Status code was not " + str(status_code), content=content, **uresp) + module.fail_json(msg="Status code was not " + str(status_code), content=u_content, **uresp) elif return_content: - module.exit_json(changed=changed, content=content, **uresp) + module.exit_json(changed=changed, content=u_content, **uresp) else: module.exit_json(changed=changed, **uresp) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py index 889a81f025a..017f6b818a6 100644 --- a/packaging/language/easy_install.py +++ b/packaging/language/easy_install.py @@ -70,6 +70,13 @@ options: version_added: "1.3" required: false default: null + state: + version_added: "2.0" + description: + - The desired state of the library. C(latest) ensures that the latest version is installed. + required: false + choices: [present, latest] + default: present notes: - Please note that the M(easy_install) module can only install Python libraries. Thus this module is not able to remove libraries. It is @@ -78,19 +85,20 @@ notes: - Also note that I(virtualenv) must be installed on the remote host if the C(virtualenv) parameter is specified. requirements: [ "virtualenv" ] -author: Matt Wright +author: "Matt Wright (@mattupstate)" ''' EXAMPLES = ''' # Examples from Ansible Playbooks -- easy_install: name=pip +- easy_install: name=pip state=latest # Install Bottle into the specified virtualenv. - easy_install: name=bottle virtualenv=/webapps/myapp/venv ''' -def _is_package_installed(module, name, easy_install): - cmd = '%s --dry-run %s' % (easy_install, name) +def _is_package_installed(module, name, easy_install, executable_arguments): + executable_arguments = executable_arguments + ['--dry-run'] + cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) rc, status_stdout, status_stderr = module.run_command(cmd) return not ('Reading' in status_stdout or 'Downloading' in status_stdout) @@ -124,6 +132,10 @@ def _get_easy_install(module, env=None, executable=None): def main(): arg_spec = dict( name=dict(required=True), + state=dict(required=False, + default='present', + choices=['present','latest'], + type='str'), virtualenv=dict(default=None, required=False), virtualenv_site_packages=dict(default='no', type='bool'), virtualenv_command=dict(default='virtualenv', required=False), @@ -137,6 +149,9 @@ def main(): executable = module.params['executable'] site_packages = module.params['virtualenv_site_packages'] virtualenv_command = module.params['virtualenv_command'] + executable_arguments = [] + if module.params['state'] == 'latest': + executable_arguments.append('--upgrade') rc = 0 err = '' @@ -162,12 +177,12 @@ def main(): cmd = None changed = False - installed = _is_package_installed(module, name, easy_install) + installed = _is_package_installed(module, name, easy_install, executable_arguments) if not installed: if module.check_mode: module.exit_json(changed=True) - cmd = '%s %s' % (easy_install, name) + cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) rc_easy_inst, out_easy_inst, err_easy_inst = module.run_command(cmd) rc += rc_easy_inst diff --git a/packaging/language/gem.py b/packaging/language/gem.py index 3740a3e7ce3..d058193624a 100644 --- a/packaging/language/gem.py +++ b/packaging/language/gem.py @@ -73,7 +73,20 @@ options: required: false default: "no" version_added: "1.6" -author: Johan Wiren + include_doc: + description: + - Install with or without docs. + required: false + default: "no" + version_added: "2.0" + build_flags: + description: + - Allow adding build flags for gem compilation + required: false + version_added: "2.0" +author: + - "Ansible Core Team" + - "Johan Wiren" ''' EXAMPLES = ''' @@ -182,9 +195,12 @@ def install(module): cmd.append('--no-user-install') if module.params['pre_release']: cmd.append('--pre') - cmd.append('--no-rdoc') - cmd.append('--no-ri') + if not module.params['include_doc']: + cmd.append('--no-rdoc') + cmd.append('--no-ri') cmd.append(module.params['gem_source']) + if module.params['build_flags']: + cmd.extend([ '--', module.params['build_flags'] ]) module.run_command(cmd, check_rc=True) def main(): @@ -198,8 +214,10 @@ def main(): repository = dict(required=False, aliases=['source'], type='str'), state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'), user_install = dict(required=False, default=True, type='bool'), - pre_release = dict(required=False, default=False, type='bool'), + pre_release = dict(required=False, default=False, type='bool'), + include_doc = dict(required=False, default=False, type='bool'), version = dict(required=False, type='str'), + build_flags = dict(required=False, type='str'), ), supports_check_mode = True, mutually_exclusive = [ ['gem_source','repository'], ['gem_source','version'] ], diff --git a/packaging/language/pip.py b/packaging/language/pip.py index a0c70c1a187..8bbae35038d 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -63,13 +63,21 @@ options: default: "no" choices: [ "yes", "no" ] virtualenv_command: - version_aded: "1.1" + version_added: "1.1" description: - The command or a pathname to the command to create the virtual environment with. For example C(pyvenv), C(virtualenv), C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv). required: false default: virtualenv + virtualenv_python: + version_added: "2.0" + description: + - The Python executable used for creating the virtual environment. + For example C(python3.4), C(python2.7). When not specified, the + system Python version is used. + required: false + default: null state: description: - The state of module @@ -100,7 +108,7 @@ options: notes: - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified and the virtualenv needs to be initialized. requirements: [ "virtualenv", "pip" ] -author: Matt Wright +author: "Matt Wright (@mattupstate)" ''' EXAMPLES = ''' @@ -147,7 +155,7 @@ def _get_cmd_options(module, cmd): words = stdout.strip().split() cmd_options = [ x for x in words if x.startswith('--') ] return cmd_options - + def _get_full_name(name, version=None): if version is None: @@ -228,6 +236,7 @@ def main(): virtualenv=dict(default=None, required=False), virtualenv_site_packages=dict(default='no', type='bool'), virtualenv_command=dict(default='virtualenv', required=False), + virtualenv_python=dict(default=None, required=False, type='str'), use_mirrors=dict(default='yes', type='bool'), extra_args=dict(default=None, required=False), chdir=dict(default=None, required=False), @@ -243,6 +252,7 @@ def main(): version = module.params['version'] requirements = module.params['requirements'] extra_args = module.params['extra_args'] + virtualenv_python = module.params['virtualenv_python'] chdir = module.params['chdir'] if state == 'latest' and version is not None: @@ -260,18 +270,21 @@ def main(): if module.check_mode: module.exit_json(changed=True) - virtualenv = os.path.expanduser(virtualenv_command) - if os.path.basename(virtualenv) == virtualenv: - virtualenv = module.get_bin_path(virtualenv_command, True) + cmd = os.path.expanduser(virtualenv_command) + if os.path.basename(cmd) == cmd: + cmd = module.get_bin_path(virtualenv_command, True) if module.params['virtualenv_site_packages']: - cmd = '%s --system-site-packages %s' % (virtualenv, env) + cmd += ' --system-site-packages' else: - cmd_opts = _get_cmd_options(module, virtualenv) + cmd_opts = _get_cmd_options(module, cmd) if '--no-site-packages' in cmd_opts: - cmd = '%s --no-site-packages %s' % (virtualenv, env) - else: - cmd = '%s %s' % (virtualenv, env) + cmd += ' --no-site-packages' + + if virtualenv_python: + cmd += ' -p%s' % virtualenv_python + + cmd = "%s %s" % (cmd, env) this_dir = tempfile.gettempdir() if chdir: this_dir = os.path.join(this_dir, chdir) @@ -286,14 +299,14 @@ def main(): cmd = '%s %s' % (pip, state_map[state]) # If there's a virtualenv we want things we install to be able to use other - # installations that exist as binaries within this virtualenv. Example: we - # install cython and then gevent -- gevent needs to use the cython binary, - # not just a python package that will be found by calling the right python. + # installations that exist as binaries within this virtualenv. Example: we + # install cython and then gevent -- gevent needs to use the cython binary, + # not just a python package that will be found by calling the right python. # So if there's a virtualenv, we add that bin/ to the beginning of the PATH # in run_command by setting path_prefix here. path_prefix = None if env: - path_prefix="/".join(pip.split('/')[:-1]) + path_prefix = "/".join(pip.split('/')[:-1]) # Automatically apply -e option to extra_args when source is a VCS url. VCS # includes those beginning with svn+, git+, hg+ or bzr+ @@ -320,7 +333,7 @@ def main(): this_dir = os.path.join(this_dir, chdir) if module.check_mode: - if env or extra_args or requirements or state == 'latest' or not name: + if extra_args or requirements or state == 'latest' or not name: module.exit_json(changed=True) elif name.startswith('svn+') or name.startswith('git+') or \ name.startswith('hg+') or name.startswith('bzr+'): @@ -343,7 +356,8 @@ def main(): rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir) out += out_pip err += err_pip - if rc == 1 and state == 'absent' and 'not installed' in out_pip: + if rc == 1 and state == 'absent' and \ + ('not installed' in out_pip or 'not installed' in err_pip): pass # rc is 1 when attempting to uninstall non-installed package elif rc != 0: _fail(module, cmd, out, err) @@ -354,7 +368,8 @@ def main(): changed = 'Successfully installed' in out_pip module.exit_json(changed=changed, cmd=cmd, name=name, version=version, - state=state, requirements=requirements, virtualenv=env, stdout=out, stderr=err) + state=state, requirements=requirements, virtualenv=env, + stdout=out, stderr=err) # import module snippets from ansible.module_utils.basic import * diff --git a/packaging/os/apt.py b/packaging/os/apt.py index a160c13c311..19a7c426f5e 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -80,8 +80,8 @@ options: - 'Note: This does not upgrade a specific package, use state=latest for that.' version_added: "1.1" required: false - default: "yes" - choices: [ "yes", "safe", "full", "dist"] + default: "no" + choices: [ "no", "yes", "safe", "full", "dist"] dpkg_options: description: - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' @@ -94,7 +94,7 @@ options: required: false version_added: "1.6" requirements: [ python-apt, aptitude ] -author: Matthew Williams +author: "Matthew Williams (@mgwilliams)" notes: - Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise C(apt-get) suffices. @@ -138,6 +138,28 @@ EXAMPLES = ''' - apt: pkg=foo state=build-dep ''' +RETURN = ''' +cache_updated: + description: if the cache was updated or not + returned: success, in some cases + type: boolean + sample: True +cache_update_time: + description: time of the last cache update (0 if unknown) + returned: success, in some cases + type: datetime + sample: 1425828348000 +stdout: + description: output from apt + returned: success, when needed + type: string + sample: "Reading package lists...\nBuilding dependency tree...\nReading state information...\nThe following extra packages will be installed:\n apache2-bin ..." +stderr: + description: error output from apt + returned: success, when needed + type: string + sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..." +''' import traceback # added to stave off future warnings about apt api @@ -206,8 +228,16 @@ def package_status(m, pkgname, version, cache, state): except KeyError: if state == 'install': try: - if cache.get_providing_packages(pkgname): - return False, True, False + provided_packages = cache.get_providing_packages(pkgname) + if provided_packages: + is_installed = False + # when virtual package providing only one package, look up status of target package + if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: + package = provided_packages[0] + installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install') + if installed: + is_installed = True + return is_installed, True, False m.fail_json(msg="No package matching '%s' is available" % pkgname) except AttributeError: # python-apt version too old to detect virtual packages @@ -518,7 +548,7 @@ def main(): default_release = dict(default=None, aliases=['default-release']), install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), - upgrade = dict(choices=['yes', 'safe', 'full', 'dist']), + upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) ), mutually_exclusive = [['package', 'upgrade', 'deb']], @@ -542,9 +572,15 @@ def main(): APT_GET_CMD = module.get_bin_path("apt-get") p = module.params + + if p['upgrade'] == 'no': + p['upgrade'] = None + if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]: module.fail_json(msg="Could not find aptitude. Please ensure it is installed.") + updated_cache = False + updated_cache_time = 0 install_recommends = p['install_recommends'] dpkg_options = expand_dpkg_options(p['dpkg_options']) @@ -567,41 +603,41 @@ def main(): if p['update_cache']: # Default is: always update the cache cache_valid = False - if p['cache_valid_time']: - tdelta = datetime.timedelta(seconds=p['cache_valid_time']) + now = datetime.datetime.now() + if p.get('cache_valid_time', False): try: mtime = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime except: - mtime = False - if mtime is False: # Looks like the update-success-stamp is not available # Fallback: Checking the mtime of the lists try: mtime = os.stat(APT_LISTS_PATH).st_mtime except: + # No mtime could be read. We update the cache to be safe mtime = False - if mtime is False: - # No mtime could be read - looks like lists are not there - # We update the cache to be safe - cache_valid = False - else: + + if mtime: + tdelta = datetime.timedelta(seconds=p['cache_valid_time']) mtimestamp = datetime.datetime.fromtimestamp(mtime) - if mtimestamp + tdelta >= datetime.datetime.now(): - # dont update the cache - # the old cache is less than cache_valid_time seconds old - so still valid + if mtimestamp + tdelta >= now: cache_valid = True + updated_cache_time = int(time.mktime(mtimestamp.timetuple())) if cache_valid is not True: cache.update() cache.open(progress=None) + updated_cache = True + updated_cache_time = int(time.mktime(now.timetuple())) if not p['package'] and not p['upgrade'] and not p['deb']: - module.exit_json(changed=False) + module.exit_json(changed=False, cache_updated=updated_cache, cache_update_time=updated_cache_time) + else: + updated_cache = False + updated_cache_time = 0 force_yes = p['force'] if p['upgrade']: - upgrade(module, p['upgrade'], force_yes, - p['default_release'], dpkg_options) + upgrade(module, p['upgrade'], force_yes, p['default_release'], dpkg_options) if p['deb']: if p['state'] != 'present': @@ -631,6 +667,8 @@ def main(): force=force_yes, dpkg_options=dpkg_options, build_dep=state_builddep) (success, retvals) = result + retvals['cache_updated']=updated_cache + retvals['cache_update_time']=updated_cache_time if success: module.exit_json(**retvals) else: diff --git a/packaging/os/apt_key.py b/packaging/os/apt_key.py index 2967646feff..d41664f121a 100644 --- a/packaging/os/apt_key.py +++ b/packaging/os/apt_key.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: apt_key -author: Jayson Vantuyl & others +author: "Jayson Vantuyl & others (@jvantuyl)" version_added: "1.0" short_description: Add or remove an apt key description: diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index 5153699c8bf..8f6d18d09d5 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -63,7 +63,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] -author: Alexander Saltanov +author: "Alexander Saltanov (@sashka)" version_added: "0.7" requirements: [ python-apt ] ''' @@ -126,6 +126,8 @@ class InvalidSource(Exception): class SourcesList(object): def __init__(self): self.files = {} # group sources by file + # Repositories that we're adding -- used to implement mode param + self.new_repos = set() self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist') # read sources.list if it exists @@ -238,10 +240,6 @@ class SourcesList(object): d, fn = os.path.split(filename) fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d) - # allow the user to override the default mode - this_mode = module.params['mode'] - module.set_mode_if_different(tmp_path, this_mode, False) - f = os.fdopen(fd, 'w') for n, valid, enabled, source, comment in sources: chunks = [] @@ -259,6 +257,11 @@ class SourcesList(object): except IOError, err: module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err))) module.atomic_move(tmp_path, filename) + + # allow the user to override the default mode + if filename in self.new_repos: + this_mode = module.params['mode'] + module.set_mode_if_different(filename, this_mode, False) else: del self.files[filename] if os.path.exists(filename): @@ -300,6 +303,7 @@ class SourcesList(object): files = self.files[file] files.append((len(files), True, True, source_new, comment_new)) + self.new_repos.add(file) def add_source(self, line, comment='', file=None): source = self._parse(line, raise_if_invalid_or_disabled=True)[2] @@ -374,6 +378,25 @@ class UbuntuSourcesList(SourcesList): source = self._parse(line, raise_if_invalid_or_disabled=True)[2] self._remove_valid_source(source) + @property + def repos_urls(self): + _repositories = [] + for parsed_repos in self.files.values(): + for parsed_repo in parsed_repos: + enabled = parsed_repo[1] + source_line = parsed_repo[3] + + if not enabled: + continue + + if source_line.startswith('ppa:'): + source, ppa_owner, ppa_name = self._expand_ppa(i[3]) + _repositories.append(source) + else: + _repositories.append(source_line) + + return _repositories + def get_add_ppa_signing_key_callback(module): def _run_command(command): @@ -421,8 +444,13 @@ def main(): sources_before = sourceslist.dump() + if repo.startswith('ppa:'): + expanded_repo = sourceslist._expand_ppa(repo)[0] + else: + expanded_repo = repo + try: - if state == 'present': + if state == 'present' and expanded_repo not in sourceslist.repos_urls: sourceslist.add_source(repo) elif state == 'absent': sourceslist.remove_source(repo) diff --git a/packaging/os/apt_rpm.py b/packaging/os/apt_rpm.py index a85c528a239..fec220e0512 100644 --- a/packaging/os/apt_rpm.py +++ b/packaging/os/apt_rpm.py @@ -44,7 +44,7 @@ options: required: false default: no choices: [ "yes", "no" ] -author: Evgenii Terechkov +author: "Evgenii Terechkov (@evgkrsk)" notes: [] ''' diff --git a/packaging/os/package.py b/packaging/os/package.py new file mode 100644 index 00000000000..288ca83a772 --- /dev/null +++ b/packaging/os/package.py @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Ansible, inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: package +version_added: 2.0 +author: + - Ansible Inc +maintainers: + - Ansible Core Team +short_description: Generic OS package manager +description: + - Installs, upgrade and removes packages using the underlying OS package manager. +options: + name: + description: + - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file. To operate on several packages this can accept a comma separated list of packages or (as of 2.0) a list of packages." + required: true + state: + description: + - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. + required: true + use: + description: + - The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it. + required: false + default: auto +requirements: + - Whatever is required for the package plugins specific for each system. +notes: + - This module actually calls the pertinent package modules for each system (apt, yum, etc). +''' +EXAMPLES = ''' +- name: install the latest version of ntpdate + package: name=ntpdate state=latest + +# This uses a variable as this changes per distribution. +- name: remove the apache package + package : name={{apache}} state=absent +''' diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index 4248f3923a9..1cfd8fc25a6 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -7,7 +7,7 @@ short_description: Manage Red Hat Network registration and subscriptions using t description: - Manage registration and subscription to the Red Hat Network entitlement platform. version_added: "1.2" -author: James Laska +author: "James Laska (@jlaska)" notes: - In order to register a system, subscription-manager requires either a username and password, or an activationkey. requirements: diff --git a/packaging/os/rhn_channel.py b/packaging/os/rhn_channel.py index 42d61f36e66..0071183158e 100644 --- a/packaging/os/rhn_channel.py +++ b/packaging/os/rhn_channel.py @@ -24,7 +24,7 @@ short_description: Adds or removes Red Hat software channels description: - Adds or removes Red Hat software channels version_added: "1.1" -author: Vincent Van der Kussen +author: "Vincent Van der Kussen (@vincentvdk)" notes: - this module fetches the system id from RHN. requirements: diff --git a/packaging/os/rhn_register.py b/packaging/os/rhn_register.py index 1e92405c827..b67b442aa22 100644 --- a/packaging/os/rhn_register.py +++ b/packaging/os/rhn_register.py @@ -56,6 +56,12 @@ options: - supply an activation key for use with registration required: False default: null + profilename: + description: + - supply an profilename for use with registration + required: False + default: null + version_added: "2.0" channels: description: - Optionally specify a list of comma-separated channels to subscribe to upon successful registration. @@ -73,6 +79,9 @@ EXAMPLES = ''' # Register with activationkey (1-222333444) and enable extended update support. - rhn_register: state=present activationkey=1-222333444 enable_eus=true +# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname. +- rhn_register: state=present activationkey=1-222333444 profilename=host.example.com.custom + # Register as user (joe_user) with password (somepass) against a satellite # server specified by (server_url). - rhn_register: > @@ -209,7 +218,7 @@ class Rhn(RegistrationBase): self.update_plugin_conf('rhnplugin', True) self.update_plugin_conf('subscription-manager', False) - def register(self, enable_eus=False, activationkey=None): + def register(self, enable_eus=False, activationkey=None, profilename=None): ''' Register system to RHN. If enable_eus=True, extended update support will be requested. @@ -221,7 +230,8 @@ class Rhn(RegistrationBase): register_cmd += " --use-eus-channel" if activationkey is not None: register_cmd += " --activationkey '%s'" % activationkey - # FIXME - support --profilename + if profilename is not None: + register_cmd += " --profilename '%s'" % profilename # FIXME - support --systemorgid rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True) @@ -285,6 +295,7 @@ def main(): password = dict(default=None, required=False), server_url = dict(default=rhn.config.get_option('serverURL'), required=False), activationkey = dict(default=None, required=False), + profilename = dict(default=None, required=False), enable_eus = dict(default=False, type='bool'), channels = dict(default=[], type='list'), ) @@ -295,6 +306,7 @@ def main(): rhn.password = module.params['password'] rhn.configure(module.params['server_url']) activationkey = module.params['activationkey'] + profilename = module.params['profilename'] channels = module.params['channels'] rhn.module = module diff --git a/packaging/os/rpm_key.py b/packaging/os/rpm_key.py index f132d552506..d2d5e684015 100644 --- a/packaging/os/rpm_key.py +++ b/packaging/os/rpm_key.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rpm_key -author: Hector Acosta +author: "Hector Acosta (@hacosta) " short_description: Adds or removes a gpg key from the rpm db description: - Adds or removes (rpm --import) a gpg key to your rpm database. @@ -60,9 +60,10 @@ EXAMPLES = ''' # Example action to ensure a key is not present in the db - rpm_key: state=absent key=DEADB33F ''' +import re import syslog import os.path -import re +import urllib2 import tempfile def is_pubkey(string): @@ -203,4 +204,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/yum.py b/packaging/os/yum.py index a8b996c84de..29d6b0100dc 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -26,6 +26,11 @@ import traceback import os import yum import rpm +import syslog +import platform +import tempfile +import shutil +from distutils.version import LooseVersion try: from yum.misc import find_unfinished_transactions, find_ts_remaining @@ -113,10 +118,24 @@ options: choices: ["yes", "no"] aliases: [] -notes: [] +notes: + - When used with a loop of package names in a playbook, ansible optimizes + the call to the yum module. Instead of calling the module with a single + package each time through the loop, ansible calls the module once with all + of the package names from the loop. + - In versions prior to 1.9.2 this module installed and removed each package + given to the yum module separately. This caused problems when packages + specified by filename or url had to be installed or removed together. In + 1.9.2 this was fixed so that packages are installed in one yum + transaction. However, if one of the packages adds a new yum repository + that the other packages come from (such as epel-release) then that package + needs to be installed in a separate task. This mimics yum's command line + behaviour. # informational: requirements for nodes requirements: [ yum ] -author: Seth Vidal +author: + - "Ansible Core Team" + - "Seth Vidal" ''' EXAMPLES = ''' @@ -145,16 +164,11 @@ EXAMPLES = ''' yum: name="@Development tools" state=present ''' +# 64k. Number of bytes to read at a time when manually downloading pkgs via a url +BUFSIZE = 65536 + def_qf = "%{name}-%{version}-%{release}.%{arch}" -repoquery='/usr/bin/repoquery' -if not os.path.exists(repoquery): - repoquery = None - -yumbin='/usr/bin/yum' - -import syslog - def log(msg): syslog.openlog('ansible-yum', 0, syslog.LOG_USER) syslog.syslog(syslog.LOG_NOTICE, msg) @@ -166,18 +180,27 @@ def yum_base(conf_file=None): my.preconf.errorlevel=0 if conf_file and os.path.exists(conf_file): my.preconf.fn = conf_file + if os.geteuid() != 0: + if hasattr(my, 'setCacheDir'): + my.setCacheDir() + else: + cachedir = yum.misc.getCacheDir() + my.repos.setCacheDir(cachedir) + my.conf.cache = 0 + return my -def install_yum_utils(module): +def ensure_yum_utils(module): - if not module.check_mode: + repoquerybin = module.get_bin_path('repoquery', required=False) + + if module.params['install_repoquery'] and not repoquerybin and not module.check_mode: yum_path = module.get_bin_path('yum') if yum_path: - rc, so, se = module.run_command('%s -y install yum-utils' % yum_path) - if rc == 0: - this_path = module.get_bin_path('repoquery') - global repoquery - repoquery = this_path + rc, so, se = module.run_command('%s -y install yum-utils' % yum_path) + repoquerybin = module.get_bin_path('repoquery', required=False) + + return repoquerybin def po_to_nevra(po): @@ -186,8 +209,11 @@ def po_to_nevra(po): else: return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch) -def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[], is_pkg=False): - +def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None, is_pkg=False): + if en_repos is None: + en_repos = [] + if dis_repos is None: + dis_repos = [] if not repoq: pkgs = [] @@ -225,7 +251,11 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_ return [] -def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): +def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): + if en_repos is None: + en_repos = [] + if dis_repos is None: + dis_repos = [] if not repoq: @@ -262,10 +292,13 @@ def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_ else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) - return [] -def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): +def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): + if en_repos is None: + en_repos = [] + if dis_repos is None: + dis_repos = [] if not repoq: @@ -310,9 +343,13 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_rep else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) - return [] + return set() -def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): +def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): + if en_repos is None: + en_repos = [] + if dis_repos is None: + dis_repos = [] if not repoq: @@ -358,7 +395,7 @@ def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], d else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) - return [] + return set() def transaction_exists(pkglist): """ @@ -448,10 +485,10 @@ def repolist(module, repoq, qf="%{repoid}"): ret = set([ p for p in out.split('\n') if p.strip() ]) return ret -def list_stuff(module, conf_file, stuff): +def list_stuff(module, repoquerybin, conf_file, stuff): qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}" - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet'] + repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] if conf_file and os.path.exists(conf_file): repoq += ['-c', conf_file] @@ -468,11 +505,13 @@ def list_stuff(module, conf_file, stuff): def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): + pkgs = [] res = {} res['results'] = [] res['msg'] = '' res['rc'] = 0 res['changed'] = False + tempdir = tempfile.mkdtemp() for spec in items: pkg = None @@ -495,6 +534,23 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # URL elif '://' in spec: pkg = spec + # Check if Enterprise Linux 5 or less, as yum on those versions do not support installing via url + distribution_version = get_distribution_version() + distribution = platform.dist() + if distribution[0] == "redhat" and LooseVersion(distribution_version) < LooseVersion("6"): + package = os.path.join(tempdir, str(pkg.rsplit('/', 1)[1])) + try: + rsp, info = fetch_url(module, pkg) + f = open(package, 'w') + data = rsp.read(BUFSIZE) + while data: + f.write(data) + data = rsp.read(BUFSIZE) + f.close() + pkg = package + except Exception, e: + shutil.rmtree(tempdir) + module.fail_json(msg="Failure downloading %s, %s" % (spec, e)) #groups :( elif spec.startswith('@'): @@ -507,9 +563,9 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # short circuit all the bs - and search for it as a pkg in is_installed # if you find it then we're done if not set(['*','?']).intersection(set(spec)): - pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) - if pkgs: - res['results'].append('%s providing %s is already installed' % (pkgs[0], spec)) + installed_pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) + if installed_pkgs: + res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec)) continue # look up what pkgs provide this @@ -553,20 +609,30 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # the error we're catching here pkg = spec - cmd = yum_basecmd + ['install', pkg] + pkgs.append(pkg) + + if pkgs: + cmd = yum_basecmd + ['install'] + pkgs if module.check_mode: + # Remove rpms downloaded for EL5 via url + try: + shutil.rmtree(tempdir) + except Exception, e: + module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) module.exit_json(changed=True) changed = True rc, out, err = module.run_command(cmd) - # Fail on invalid urls: - if (rc == 1 and '://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)): - err = 'Package at %s could not be installed' % spec - module.fail_json(changed=False,msg=err,rc=1) - elif (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out: + if (rc == 1): + for spec in items: + # Fail on invalid urls: + if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)): + err = 'Package at %s could not be installed' % spec + module.fail_json(changed=False,msg=err,rc=1) + if (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out: # avoid failing in the 'Nothing To Do' case # this may happen with an URL spec. # for an already installed group, @@ -576,22 +642,29 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): out = '%s: Nothing to do' % spec changed = False - res['rc'] += rc + res['rc'] = rc res['results'].append(out) res['msg'] += err # FIXME - if we did an install - go and check the rpmdb to see if it actually installed - # look for the pkg in rpmdb - # look for the pkg via obsoletes + # look for each pkg in rpmdb + # look for each pkg via obsoletes - # accumulate any changes - res['changed'] |= changed + # Record change + res['changed'] = changed - module.exit_json(**res) + # Remove rpms downloaded for EL5 via url + try: + shutil.rmtree(tempdir) + except Exception, e: + module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) + + return res def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): + pkgs = [] res = {} res['results'] = [] res['msg'] = '' @@ -608,17 +681,20 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['results'].append('%s is not installed' % pkg) continue + pkgs.append(pkg) + + if pkgs: # run an actual yum transaction - cmd = yum_basecmd + ["remove", pkg] + cmd = yum_basecmd + ["remove"] + pkgs if module.check_mode: module.exit_json(changed=True) rc, out, err = module.run_command(cmd) - res['rc'] += rc + res['rc'] = rc res['results'].append(out) - res['msg'] += err + res['msg'] = err # compile the results into one batch. If anything is changed # then mark changed @@ -627,17 +703,18 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # at this point we should check to see if the pkg is no longer present - if not is_group: # we can't sensibly check for a group being uninstalled reliably - # look to see if the pkg shows up from is_installed. If it doesn't - if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): - res['changed'] = True - else: - module.fail_json(**res) + for pkg in pkgs: + if not pkg.startswith('@'): # we can't sensibly check for a group being uninstalled reliably + # look to see if the pkg shows up from is_installed. If it doesn't + if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): + res['changed'] = True + else: + module.fail_json(**res) if rc != 0: module.fail_json(**res) - - module.exit_json(**res) + + return res def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): @@ -683,7 +760,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): nothing_to_do = False break - if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos): + if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): nothing_to_do = False break @@ -719,19 +796,15 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): else: res['changed'] = True - module.exit_json(**res) + return res def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, - disable_gpg_check, exclude): + disable_gpg_check, exclude, repoq): + yumbin = module.get_bin_path('yum') # need debug level 2 to get 'Nothing to do' for groupinstall. yum_basecmd = [yumbin, '-d', '2', '-y'] - if not repoquery: - repoq = None - else: - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet'] - if conf_file and os.path.exists(conf_file): yum_basecmd += ['-c', conf_file] if repoq: @@ -751,7 +824,7 @@ def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, if exclude: e_cmd = ['--exclude=%s' % exclude] yum_basecmd.extend(e_cmd) - + if state in ['installed', 'present', 'latest']: if module.params.get('update_cache'): @@ -778,16 +851,19 @@ def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, if state in ['installed', 'present']: if disable_gpg_check: yum_basecmd.append('--nogpgcheck') - install(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) + res = install(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) elif state in ['removed', 'absent']: - remove(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) + res = remove(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) elif state == 'latest': if disable_gpg_check: yum_basecmd.append('--nogpgcheck') - latest(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) + res = latest(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) + else: + # should be caught by AnsibleModule argument_spec + module.fail_json(msg="we should never get here unless this all" + " failed", changed=False, results='', errors='unepected state') - # should be caught by AnsibleModule argument_spec - return dict(changed=False, failed=True, results='', errors='unexpected state') + return res def main(): @@ -822,29 +898,45 @@ def main(): supports_check_mode = True ) - # this should not be needed, but exists as a failsafe params = module.params - if params['install_repoquery'] and not repoquery and not module.check_mode: - install_yum_utils(module) if params['list']: - if not repoquery: + repoquerybin = ensure_yum_utils(module) + if not repoquerybin: module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.") - results = dict(results=list_stuff(module, params['conf_file'], params['list'])) - module.exit_json(**results) + results = dict(results=list_stuff(module, repoquerybin, params['conf_file'], params['list'])) else: + # If rhn-plugin is installed and no rhn-certificate is available on + # the system then users will see an error message using the yum API. + # Use repoquery in those cases. + + my = yum_base(params['conf_file']) + # A sideeffect of accessing conf is that the configuration is + # loaded and plugins are discovered + my.conf + repoquery = None + if 'rhnplugin' in my.plugins._plugins: + repoquerybin = ensure_yum_utils(module) + if repoquerybin: + repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] + pkg = [ p.strip() for p in params['name']] exclude = params['exclude'] state = params['state'] enablerepo = params.get('enablerepo', '') disablerepo = params.get('disablerepo', '') disable_gpg_check = params['disable_gpg_check'] - res = ensure(module, state, pkg, params['conf_file'], enablerepo, - disablerepo, disable_gpg_check, exclude) - module.fail_json(msg="we should never get here unless this all failed", **res) + results = ensure(module, state, pkg, params['conf_file'], enablerepo, + disablerepo, disable_gpg_check, exclude, repoquery) + if repoquery: + results['msg'] = '%s %s' % (results.get('msg',''), 'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.') + + module.exit_json(**results) # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.urls import * +if __name__ == '__main__': + main() diff --git a/source_control/git.py b/source_control/git.py index 0cb87304a92..bc35c97da93 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -21,7 +21,9 @@ DOCUMENTATION = ''' --- module: git -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" version_added: "0.0.1" short_description: Deploy software (or files) from git checkouts description: @@ -160,6 +162,19 @@ options: main project. This is equivalent to specifying the --remote flag to git submodule update. + verify_commit: + required: false + default: "no" + choices: ["yes", "no"] + version_added: "2.0" + description: + - if C(yes), when cloning or checking out a C(version) verify the + signature of a GPG signed commit. This requires C(git) version>=2.1.0 + to be installed. The commit MUST be signed and the public key MUST + be trusted in the GPG trustdb. + +requirements: + - git (the command line tool) notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, @@ -298,7 +313,7 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'): return submodules def clone(git_path, module, repo, dest, remote, depth, version, bare, - reference, refspec): + reference, refspec, verify_commit): ''' makes a new git repo if it does not already exist ''' dest_dirname = os.path.dirname(dest) try: @@ -326,6 +341,9 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, if refspec: module.run_command([git_path, 'fetch', remote, refspec], check_rc=True, cwd=dest) + if verify_commit: + verify_commit_sign(git_path, module, dest, version) + def has_local_mods(module, git_path, dest, bare): if bare: return False @@ -574,7 +592,7 @@ def submodule_update(git_path, module, dest, track_submodules): return (rc, out, err) -def switch_version(git_path, module, dest, remote, version): +def switch_version(git_path, module, dest, remote, version, verify_commit): cmd = '' if version != 'HEAD': if is_remote_branch(git_path, module, dest, remote, version): @@ -599,8 +617,20 @@ def switch_version(git_path, module, dest, remote, version): module.fail_json(msg="Failed to checkout %s" % (version)) else: module.fail_json(msg="Failed to checkout branch %s" % (branch)) + + if verify_commit: + verify_commit_sign(git_path, module, dest, version) + return (rc, out1, err1) + +def verify_commit_sign(git_path, module, dest, version): + cmd = "%s verify-commit %s" % (git_path, version) + (rc, out, err) = module.run_command(cmd, cwd=dest) + if rc != 0: + module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version) + return (rc, out, err) + # =========================================== def main(): @@ -616,6 +646,7 @@ def main(): depth=dict(default=None, type='int'), clone=dict(default='yes', type='bool'), update=dict(default='yes', type='bool'), + verify_commit=dict(default='no', type='bool'), accept_hostkey=dict(default='no', type='bool'), key_file=dict(default=None, required=False), ssh_opts=dict(default=None, required=False), @@ -637,6 +668,7 @@ def main(): update = module.params['update'] allow_clone = module.params['clone'] bare = module.params['bare'] + verify_commit = module.params['verify_commit'] reference = module.params['reference'] git_path = module.params['executable'] or module.get_bin_path('git', True) key_file = module.params['key_file'] @@ -689,7 +721,7 @@ def main(): remote_head = get_remote_head(git_path, module, dest, version, repo, bare) module.exit_json(changed=True, before=before, after=remote_head) # there's no git config, so clone - clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec) + clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit) repo_updated = True elif not update: # Just return having found a repo already in the dest path @@ -729,7 +761,7 @@ def main(): # switch to version specified regardless of whether # we got new revisions from the repository if not bare: - switch_version(git_path, module, dest, remote, version) + switch_version(git_path, module, dest, remote, version, verify_commit) # Deal with submodules submodules_updated = False diff --git a/source_control/hg.py b/source_control/hg.py index d83215fabe1..285bc6f1729 100644 --- a/source_control/hg.py +++ b/source_control/hg.py @@ -32,7 +32,7 @@ short_description: Manages Mercurial (hg) repositories. description: - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. version_added: "1.0" -author: Yeukhon Wong +author: "Yeukhon Wong (@yeukhon)" options: repo: description: @@ -65,6 +65,13 @@ options: required: false default: "no" choices: [ "yes", "no" ] + update: + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "2.0" + description: + - If C(no), do not retrieve new revisions from the origin repository executable: required: false default: null @@ -210,6 +217,7 @@ def main(): revision = dict(default=None, aliases=['version']), force = dict(default='no', type='bool'), purge = dict(default='no', type='bool'), + update = dict(default='yes', type='bool'), executable = dict(default=None), ), ) @@ -218,6 +226,7 @@ def main(): revision = module.params['revision'] force = module.params['force'] purge = module.params['purge'] + update = module.params['update'] hg_path = module.params['executable'] or module.get_bin_path('hg', True) hgrc = os.path.join(dest, '.hg/hgrc') @@ -234,6 +243,9 @@ def main(): (rc, out, err) = hg.clone() if rc != 0: module.fail_json(msg=err) + elif not update: + # Just return having found a repo already in the dest path + before = hg.get_revision() elif hg.at_revision: # no update needed, don't pull before = hg.get_revision() diff --git a/source_control/subversion.py b/source_control/subversion.py index f4a0f65fd78..cae4702e174 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -25,7 +25,7 @@ short_description: Deploys a subversion repository. description: - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout. version_added: "0.7" -author: Dane Summers, njharman@gmail.com +author: "Dane Summers (@dsummersl) " notes: - Requires I(svn) to be installed on the client. requirements: [] @@ -121,7 +121,7 @@ class Subversion(object): def checkout(self): '''Creates new svn working directory if it does not already exist.''' self._exec(["checkout", "-r", self.revision, self.repo, self.dest]) - + def export(self, force=False): '''Export svn repo to directory''' cmd = ["export"] @@ -153,11 +153,10 @@ class Subversion(object): def has_local_mods(self): '''True if revisioned files have been added or modified. Unrevisioned files are ignored.''' - lines = self._exec(["status", self.dest]) - # Match only revisioned files, i.e. ignore status '?'. - regex = re.compile(r'^[^?]') + lines = self._exec(["status", "--quiet", self.dest]) + # The --quiet option will return only modified files. # Has local mods if more than 0 modifed revisioned files. - return len(filter(regex.match, lines)) > 0 + return len(filter(len, lines)) > 0 def needs_update(self): curr, url = self.get_revision() diff --git a/system/authorized_key.py b/system/authorized_key.py index 458b94dff04..5c12cfdde92 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -34,7 +34,6 @@ options: - The username on the remote host whose authorized_keys file will be modified required: true default: null - aliases: [] key: description: - The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys) @@ -72,16 +71,18 @@ options: version_added: "1.4" exclusive: description: - - Whether to remove all other non-specified keys from the - authorized_keys file. Multiple keys can be specified in a single - key= string value by separating them by newlines. + - Whether to remove all other non-specified keys from the authorized_keys file. Multiple keys + can be specified in a single C(key) string value by separating them by newlines. + - This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration + of the loop, if you want multiple keys in the file you need to pass them all to C(key) in a + single batch as mentioned above. required: false choices: [ "yes", "no" ] default: "no" version_added: "1.9" description: - "Adds or removes authorized keys for particular user accounts" -author: Brad Olson +author: "Brad Olson (@bradobro)" ''' EXAMPLES = ''' @@ -138,7 +139,7 @@ import shlex class keydict(dict): """ a dictionary that maintains the order of keys as they are added """ - + # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class def __init__(self, *args, **kw): @@ -146,7 +147,7 @@ class keydict(dict): self.itemlist = super(keydict,self).keys() def __setitem__(self, key, value): self.itemlist.append(key) - super(keydict,self).__setitem__(key, value) + super(keydict,self).__setitem__(key, value) def __iter__(self): return iter(self.itemlist) def keys(self): @@ -154,7 +155,7 @@ class keydict(dict): def values(self): return [self[key] for key in self] def itervalues(self): - return (self[key] for key in self) + return (self[key] for key in self) def keyfile(module, user, write=False, path=None, manage_dir=True): """ @@ -168,6 +169,13 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): :return: full path string to authorized_keys for user """ + if module.check_mode: + if path is None: + module.fail_json(msg="You must provide full path to key file in check mode") + else: + keysfile = path + return keysfile + try: user_entry = pwd.getpwnam(user) except KeyError, e: @@ -214,8 +222,8 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): return keysfile def parseoptions(module, options): - ''' - reads a string containing ssh-key options + ''' + reads a string containing ssh-key options and returns a dictionary of those options ''' options_dict = keydict() #ordered dict @@ -246,7 +254,7 @@ def parsekey(module, raw_key): 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', - 'ecdsa-sha2-nistp521', + 'ecdsa-sha2-nistp521', 'ssh-dss', 'ssh-rsa', ] diff --git a/system/cron.py b/system/cron.py index f91587caf66..88985e23071 100644 --- a/system/cron.py +++ b/system/cron.py @@ -46,7 +46,7 @@ options: description: - Description of a crontab entry. default: null - required: true + required: false user: description: - The specific user whose crontab should be modified. @@ -71,9 +71,10 @@ options: backup: description: - If set, create a backup of the crontab before it is modified. - The location of the backup is returned in the C(backup) variable by this module. + The location of the backup is returned in the C(backup_file) variable by this module. required: false - default: false + choices: [ "yes", "no" ] + default: no minute: description: - Minute when the job should run ( 0-59, *, */2, etc ) @@ -117,7 +118,7 @@ options: choices: [ "reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly" ] requirements: - cron -author: Dane Summers +author: "Dane Summers (@dsummersl)" updates: [ 'Mike Grozak', 'Patrick Callahan' ] """ @@ -397,7 +398,7 @@ def main(): module = AnsibleModule( argument_spec = dict( - name=dict(required=True), + name=dict(required=False), user=dict(required=False), job=dict(required=False), cron_file=dict(required=False), diff --git a/system/group.py b/system/group.py old mode 100755 new mode 100644 index 83ea410b0b1..ab542d9bc47 --- a/system/group.py +++ b/system/group.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: group -author: Stephen Fromm +author: "Stephen Fromm (@sfromm)" version_added: "0.0.2" short_description: Add or remove groups requirements: [ groupadd, groupdel, groupmod ] @@ -121,7 +121,7 @@ class Group(object): if len(cmd) == 1: return (None, '', '') if self.module.check_mode: - return (0, '', '') + return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) @@ -233,7 +233,8 @@ class FreeBsdGroup(Group): def group_add(self, **kwargs): cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name] if self.gid is not None: - cmd.append('-g %d' % int(self.gid)) + cmd.append('-g') + cmd.append('%d' % int(self.gid)) return self.execute_command(cmd) def group_mod(self, **kwargs): @@ -241,7 +242,8 @@ class FreeBsdGroup(Group): info = self.group_info() cmd_len = len(cmd) if self.gid is not None and int(self.gid) != info[2]: - cmd.append('-g %d' % int(self.gid)) + cmd.append('-g') + cmd.append('%d' % int(self.gid)) # modify the group if cmd will do anything if cmd_len != len(cmd): if self.module.check_mode: @@ -271,7 +273,8 @@ class DarwinGroup(Group): def group_add(self, **kwargs): cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += [ '-o', 'create' ] - cmd += [ '-i', self.gid ] + if self.gid is not None: + cmd += [ '-i', self.gid ] cmd += [ '-L', self.name ] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) @@ -283,12 +286,13 @@ class DarwinGroup(Group): (rc, out, err) = self.execute_command(cmd) return (rc, out, err) - def group_mod(self): + def group_mod(self, gid=None): info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += [ '-o', 'edit' ] - cmd += [ '-i', self.gid ] + if gid is not None: + cmd += [ '-i', gid ] cmd += [ '-L', self.name ] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) diff --git a/system/hostname.py b/system/hostname.py index 307a8b687be..d9193641eb2 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: hostname -author: Hiroaki Nakamura +author: "Hiroaki Nakamura (@hnakamur)" version_added: "1.4" short_description: Manage hostname requirements: [ hostname ] @@ -248,6 +248,8 @@ class SystemdStrategy(GenericStrategy): return out.strip() def set_current_hostname(self, name): + if len(name) > 64: + self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name") cmd = ['hostnamectl', '--transient', 'set-hostname', name] rc, out, err = self.module.run_command(cmd) if rc != 0: @@ -263,6 +265,8 @@ class SystemdStrategy(GenericStrategy): return out.strip() def set_permanent_hostname(self, name): + if len(name) > 64: + self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name") cmd = ['hostnamectl', '--pretty', 'set-hostname', name] rc, out, err = self.module.run_command(cmd) if rc != 0: @@ -364,6 +368,15 @@ class FedoraHostname(Hostname): distribution = 'Fedora' strategy_class = SystemdStrategy +class SLESHostname(Hostname): + platform = 'Linux' + distribution = 'Suse linux enterprise server ' + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("12"): + strategy_class = SystemdStrategy + else: + strategy_class = UnimplementedStrategy + class OpenSUSEHostname(Hostname): platform = 'Linux' distribution = 'Opensuse ' @@ -496,6 +509,6 @@ def main(): hostname.set_permanent_hostname(name) changed = True - module.exit_json(changed=changed, name=name) + module.exit_json(changed=changed, name=name, ansible_facts=dict(ansible_hostname=name)) main() diff --git a/system/mount.py b/system/mount.py index e11d497220b..ff7094dad3b 100644 --- a/system/mount.py +++ b/system/mount.py @@ -79,7 +79,9 @@ options: notes: [] requirements: [] -author: Seth Vidal +author: + - Ansible Core Team + - Seth Vidal ''' EXAMPLES = ''' # Mount DVD read-only @@ -102,7 +104,11 @@ def write_fstab(lines, dest): fs_w.flush() fs_w.close() -def set_mount(**kwargs): +def _escape_fstab(v): + """ escape space (040), ampersand (046) and backslash (134) which are invalid in fstab fields """ + return v.replace('\\', '\\134').replace(' ', '\\040').replace('&', '\\046') + +def set_mount(module, **kwargs): """ set/change a mount point location in fstab """ # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab @@ -114,11 +120,17 @@ def set_mount(**kwargs): ) args.update(kwargs) + # save the mount name before space replacement + origname = args['name'] + # replace any space in mount name with '\040' to make it fstab compatible (man fstab) + args['name'] = args['name'].replace(' ', r'\040') + new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' to_write = [] exists = False changed = False + escaped_args = dict([(k, _escape_fstab(v)) for k, v in args.iteritems()]) for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) @@ -135,16 +147,16 @@ def set_mount(**kwargs): ld = {} ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - if ld['name'] != args['name']: + if ld['name'] != escaped_args['name']: to_write.append(line) continue # it exists - now see if what we have is different exists = True for t in ('src', 'fstype','opts', 'dump', 'passno'): - if ld[t] != args[t]: + if ld[t] != escaped_args[t]: changed = True - ld[t] = args[t] + ld[t] = escaped_args[t] if changed: to_write.append(new_line % ld) @@ -155,13 +167,14 @@ def set_mount(**kwargs): to_write.append(new_line % args) changed = True - if changed: + if changed and not module.check_mode: write_fstab(to_write, args['fstab']) - return (args['name'], changed) + # mount function needs origname + return (origname, changed) -def unset_mount(**kwargs): +def unset_mount(module, **kwargs): """ remove a mount point from fstab """ # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab @@ -173,8 +186,14 @@ def unset_mount(**kwargs): ) args.update(kwargs) + # save the mount name before space replacement + origname = args['name'] + # replace any space in mount name with '\040' to make it fstab compatible (man fstab) + args['name'] = args['name'].replace(' ', r'\040') + to_write = [] changed = False + escaped_name = _escape_fstab(args['name']) for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) @@ -191,28 +210,45 @@ def unset_mount(**kwargs): ld = {} ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - if ld['name'] != args['name']: + if ld['name'] != escaped_name: to_write.append(line) continue # if we got here we found a match - continue and mark changed changed = True - if changed: + if changed and not module.check_mode: write_fstab(to_write, args['fstab']) - return (args['name'], changed) + # umount needs origname + return (origname, changed) def mount(module, **kwargs): """ mount up a path or remount if needed """ + + # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab + args = dict( + opts = 'default', + dump = '0', + passno = '0', + fstab = '/etc/fstab' + ) + args.update(kwargs) + mount_bin = module.get_bin_path('mount') name = kwargs['name'] + + cmd = [ mount_bin, ] + if os.path.ismount(name): - cmd = [ mount_bin , '-o', 'remount', name ] - else: - cmd = [ mount_bin, name ] + cmd += [ '-o', 'remount', ] + + if get_platform().lower() == 'freebsd': + cmd += [ '-F', args['fstab'], ] + + cmd += [ name, ] rc, out, err = module.run_command(cmd) if rc == 0: @@ -245,7 +281,8 @@ def main(): src = dict(required=True), fstype = dict(required=True), fstab = dict(default='/etc/fstab') - ) + ), + supports_check_mode=True ) @@ -260,8 +297,6 @@ def main(): args['passno'] = module.params['passno'] if module.params['opts'] is not None: args['opts'] = module.params['opts'] - if ' ' in args['opts']: - module.fail_json(msg="unexpected space in 'opts' parameter") if module.params['dump'] is not None: args['dump'] = module.params['dump'] if module.params['fstab'] is not None: @@ -282,8 +317,8 @@ def main(): state = module.params['state'] name = module.params['name'] if state == 'absent': - name, changed = unset_mount(**args) - if changed: + name, changed = unset_mount(module, **args) + if changed and not module.check_mode: if os.path.ismount(name): res,msg = umount(module, **args) if res: @@ -299,26 +334,27 @@ def main(): if state == 'unmounted': if os.path.ismount(name): - res,msg = umount(module, **args) - if res: - module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) + if not module.check_mode: + res,msg = umount(module, **args) + if res: + module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) changed = True module.exit_json(changed=changed, **args) if state in ['mounted', 'present']: if state == 'mounted': - if not os.path.exists(name): + if not os.path.exists(name) and not module.check_mode: try: os.makedirs(name) except (OSError, IOError), e: module.fail_json(msg="Error making dir %s: %s" % (name, str(e))) - name, changed = set_mount(**args) + name, changed = set_mount(module, **args) if state == 'mounted': res = 0 if os.path.ismount(name): - if changed: + if changed and not module.check_mode: res,msg = mount(module, **args) elif 'bind' in args.get('opts', []): changed = True @@ -333,7 +369,9 @@ def main(): res,msg = mount(module, **args) else: changed = True - res,msg = mount(module, **args) + if not module.check_mode: + res,msg = mount(module, **args) + if res: module.fail_json(msg="Error mounting %s: %s" % (name, msg)) diff --git a/system/ping.py b/system/ping.py index b098d0054cd..bea7fb22f1d 100644 --- a/system/ping.py +++ b/system/ping.py @@ -29,7 +29,9 @@ description: contact. It does not make sense in playbooks, but it is useful from C(/usr/bin/ansible) options: {} -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/system/seboolean.py b/system/seboolean.py index 9799e71636a..3a150d05a20 100644 --- a/system/seboolean.py +++ b/system/seboolean.py @@ -45,7 +45,7 @@ options: notes: - Not tested on any debian based system requirements: [ ] -author: Stephen Fromm +author: "Stephen Fromm (@sfromm)" ''' EXAMPLES = ''' diff --git a/system/selinux.py b/system/selinux.py index 7f88a4a47a8..2debb95a475 100644 --- a/system/selinux.py +++ b/system/selinux.py @@ -45,7 +45,7 @@ options: notes: - Not tested on any debian based system requirements: [ libselinux-python ] -author: Derek Carter +author: "Derek Carter (@goozbach) " ''' EXAMPLES = ''' diff --git a/system/service.py b/system/service.py index 42b9a7762f3..8caece20143 100644 --- a/system/service.py +++ b/system/service.py @@ -21,7 +21,9 @@ DOCUMENTATION = ''' --- module: service -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" version_added: "0.1" short_description: Manage services. description: @@ -72,6 +74,14 @@ options: description: - Additional arguments provided on the command line aliases: [ 'args' ] + must_exist: + required: false + default: true + version_added: "2.0" + description: + - Avoid a module failure if the named service does not exist. Useful + for opportunistically starting/stopping/restarting a list of + potential services. ''' EXAMPLES = ''' @@ -95,6 +105,9 @@ EXAMPLES = ''' # Example action to restart network service for interface eth0 - service: name=network state=restarted args=eth0 + +# Example action to restart nova-compute if it exists +- service: name=nova-compute state=restarted must_exist=no ''' import platform @@ -468,7 +481,11 @@ class LinuxService(Service): self.enable_cmd = location['chkconfig'] if self.enable_cmd is None: - self.module.fail_json(msg="no service or tool found for: %s" % self.name) + if self.module.params['must_exist']: + self.module.fail_json(msg="no service or tool found for: %s" % self.name) + else: + # exiting without change on non-existent service + self.module.exit_json(changed=False, exists=False) # If no service control tool selected yet, try to see if 'service' is available if self.svc_cmd is None and location.get('service', False): @@ -476,7 +493,11 @@ class LinuxService(Service): # couldn't find anything yet if self.svc_cmd is None and not self.svc_initscript: - self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting') + if self.module.params['must_exist']: + self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting') + else: + # exiting without change on non-existent service + self.module.exit_json(changed=False, exists=False) if location.get('initctl', False): self.svc_initctl = location['initctl'] @@ -765,6 +786,9 @@ class LinuxService(Service): else: action = 'disable' + if self.module.check_mode: + rc = 0 + return (rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action)) if rc != 0: if err: @@ -861,7 +885,7 @@ class LinuxService(Service): if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed: self.execute_command("%s zap" % svc_cmd, daemonize=True) - if self.action is not "restart": + if self.action != "restart": if svc_cmd != '': # upstart or systemd or OpenRC rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True) @@ -964,16 +988,16 @@ class FreeBsdService(Service): try: return self.service_enable_rcconf() - except: + except Exception: self.module.fail_json(msg='unable to set rcvar') def service_control(self): - if self.action is "start": + if self.action == "start": self.action = "onestart" - if self.action is "stop": + if self.action == "stop": self.action = "onestop" - if self.action is "reload": + if self.action == "reload": self.action = "onereload" return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments)) @@ -1037,7 +1061,7 @@ class OpenBsdService(Service): getdef_string = stdout.rstrip() - # Depending on the service the string returned from 'default' may be + # Depending on the service the string returned from 'getdef' may be # either a set of flags or the boolean YES/NO if getdef_string == "YES" or getdef_string == "NO": default_flags = '' @@ -1051,7 +1075,7 @@ class OpenBsdService(Service): get_string = stdout.rstrip() - # Depending on the service the string returned from 'getdef/get' may be + # Depending on the service the string returned from 'get' may be # either a set of flags or the boolean YES/NO if get_string == "YES" or get_string == "NO": current_flags = '' @@ -1179,9 +1203,9 @@ class NetBsdService(Service): self.running = True def service_control(self): - if self.action is "start": + if self.action == "start": self.action = "onestart" - if self.action is "stop": + if self.action == "stop": self.action = "onestop" self.svc_cmd = "%s" % self.svc_initscript @@ -1397,6 +1421,7 @@ def main(): enabled = dict(type='bool'), runlevel = dict(required=False, default='default'), arguments = dict(aliases=['args'], default=''), + must_exist = dict(type='bool', default=True), ), supports_check_mode=True ) diff --git a/system/setup.py b/system/setup.py index 486304230bf..2fbe71e260a 100644 --- a/system/setup.py +++ b/system/setup.py @@ -57,7 +57,9 @@ notes: - If the target host is Windows, you will not currently have the ability to use C(fact_path) or C(filter) as this is provided by a simpler implementation of the module. Different facts are returned for Windows hosts. -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = """ diff --git a/system/sysctl.py b/system/sysctl.py index 4517c724ca9..e48d5df74c5 100644 --- a/system/sysctl.py +++ b/system/sysctl.py @@ -71,7 +71,7 @@ options: default: False notes: [] requirements: [] -author: David "DaviXX" CHANIAL +author: "David CHANIAL (@davixx) " ''' EXAMPLES = ''' @@ -322,7 +322,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(aliases=['key'], required=True), - value = dict(aliases=['val'], required=False), + value = dict(aliases=['val'], required=False, type='str'), state = dict(default='present', choices=['present', 'absent']), reload = dict(default=True, type='bool'), sysctl_set = dict(default=False, type='bool'), diff --git a/system/user.py b/system/user.py old mode 100755 new mode 100644 index dd141565bde..1045df70e67 --- a/system/user.py +++ b/system/user.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: user -author: Stephen Fromm +author: "Stephen Fromm (@sfromm)" version_added: "0.2" short_description: Manage user accounts requirements: [ useradd, userdel, usermod ] @@ -253,7 +253,6 @@ class User(object): self.group = module.params['group'] self.groups = module.params['groups'] self.comment = module.params['comment'] - self.home = module.params['home'] self.shell = module.params['shell'] self.password = module.params['password'] self.force = module.params['force'] @@ -269,8 +268,12 @@ class User(object): self.ssh_comment = module.params['ssh_key_comment'] self.ssh_passphrase = module.params['ssh_key_passphrase'] self.update_password = module.params['update_password'] + self.home = None self.expires = None + if module.params['home'] is not None: + self.home = os.path.expanduser(module.params['home']) + if module.params['expires']: try: self.expires = time.gmtime(module.params['expires']) diff --git a/test-docs.sh b/test-docs.sh new file mode 100755 index 00000000000..76297fbada6 --- /dev/null +++ b/test-docs.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -x + +CHECKOUT_DIR=".ansible-checkout" +MOD_REPO="$1" + +# Hidden file to avoid the module_formatter recursing into the checkout +git clone https://github.com/ansible/ansible "$CHECKOUT_DIR" +cd "$CHECKOUT_DIR" +git submodule update --init +rm -rf "lib/ansible/modules/$MOD_REPO" +ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO" + +pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx + +. ./hacking/env-setup +PAGER=/bin/cat bin/ansible-doc -l +if [ $? -ne 0 ] ; then + exit $? +fi +make -C docsite diff --git a/utilities/helper/accelerate.py b/utilities/helper/accelerate.py index 726195d72e4..8ae8ab263be 100644 --- a/utilities/helper/accelerate.py +++ b/utilities/helper/accelerate.py @@ -66,7 +66,7 @@ notes: requirements: - "python >= 2.6" - "python-keyczar" -author: James Cammarata +author: "James Cammarata (@jimi-c)" ''' EXAMPLES = ''' diff --git a/utilities/helper/fireball.py b/utilities/helper/fireball.py index 43760969a89..97b4acc85a0 100644 --- a/utilities/helper/fireball.py +++ b/utilities/helper/fireball.py @@ -45,7 +45,9 @@ options: notes: - See the advanced playbooks chapter for more about using fireball mode. requirements: [ "zmq", "keyczar" ] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/utilities/logic/assert.py b/utilities/logic/assert.py index f5963d60cd7..e9e359f421a 100644 --- a/utilities/logic/assert.py +++ b/utilities/logic/assert.py @@ -31,7 +31,9 @@ options: - "A string expression of the same form that can be passed to the 'when' statement" - "Alternatively, a list of string expressions" required: true -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/utilities/logic/async_status.py b/utilities/logic/async_status.py index f991b50064b..8b134c94a7b 100644 --- a/utilities/logic/async_status.py +++ b/utilities/logic/async_status.py @@ -43,7 +43,9 @@ options: notes: - See also U(http://docs.ansible.com/playbooks_async.html) requirements: [] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' import datetime diff --git a/utilities/logic/debug.py b/utilities/logic/debug.py index 2df68ca0830..5142709dbe0 100644 --- a/utilities/logic/debug.py +++ b/utilities/logic/debug.py @@ -38,7 +38,9 @@ options: var: description: - A variable name to debug. Mutually exclusive with the 'msg' option. -author: Dag Wieers, Michael DeHaan +author: + - "Dag Wieers (@dagwieers)" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/utilities/logic/fail.py b/utilities/logic/fail.py index 23f5b83668c..75a7c81d1cf 100644 --- a/utilities/logic/fail.py +++ b/utilities/logic/fail.py @@ -34,7 +34,7 @@ options: required: false default: "'Failed as requested from task'" -author: Dag Wieers +author: "Dag Wieers (@dagwieers)" ''' EXAMPLES = ''' diff --git a/utilities/logic/include_vars.py b/utilities/logic/include_vars.py index 4c7c39d9035..a6b2b5b152f 100644 --- a/utilities/logic/include_vars.py +++ b/utilities/logic/include_vars.py @@ -10,7 +10,7 @@ DOCUMENTATION = ''' --- -author: Benno Joy +author: "Benno Joy (@bennojoy)" module: include_vars short_description: Load variables from files, dynamically within a task. description: diff --git a/utilities/logic/pause.py b/utilities/logic/pause.py index 6e8a83afe61..f1d10bf017f 100644 --- a/utilities/logic/pause.py +++ b/utilities/logic/pause.py @@ -25,7 +25,7 @@ options: - Optional text to use for the prompt message. required: false default: null -author: Tim Bielawa +author: "Tim Bielawa (@tbielawa)" ''' EXAMPLES = ''' diff --git a/utilities/logic/set_fact.py b/utilities/logic/set_fact.py index f9124ab0ea5..f05dbf76795 100644 --- a/utilities/logic/set_fact.py +++ b/utilities/logic/set_fact.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -author: Dag Wieers +author: "Dag Wieers (@dagwieers)" module: set_fact short_description: Set host facts from a task description: diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 1f549570516..95653b56d3e 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -101,7 +101,10 @@ options: notes: - The ability to use search_regex with a port connection was added in 1.7. requirements: [] -author: Jeroen Hoekx, John Jarvis, Andrii Radyk +author: + - "Jeroen Hoekx (@jhoekx)" + - "John Jarvis (@jarv)" + - "Andrii Radyk (@AnderEnder)" ''' EXAMPLES = ''' @@ -337,12 +340,15 @@ def main(): if params['exclude_hosts'] is not None and state != 'drained': module.fail_json(msg="exclude_hosts should only be with state=drained") + start = datetime.datetime.now() if delay: time.sleep(delay) - if state in [ 'stopped', 'absent' ]: + if not port and not path and state != 'drained': + time.sleep(timeout) + elif state in [ 'stopped', 'absent' ]: ### first wait for the stop condition end = start + datetime.timedelta(seconds=timeout) @@ -365,6 +371,8 @@ def main(): time.sleep(1) except: break + else: + time.sleep(1) else: elapsed = datetime.datetime.now() - start if port: @@ -427,6 +435,8 @@ def main(): except: time.sleep(1) pass + else: + time.sleep(1) else: elapsed = datetime.datetime.now() - start if port: diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index 817e782aa76..cb43ba9b0eb 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' --- module: apache2_module version_added: 1.6 +author: "Christian Berendt (@berendt)" short_description: enables/disables a module of the Apache2 webserver description: - Enables or disables a specified module of the Apache2 webserver. @@ -34,6 +35,7 @@ options: choices: ['present', 'absent'] default: present +requirements: ["a2enmod","a2dismod"] ''' EXAMPLES = ''' diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 46ebb2fb8f1..b3cabfe01b5 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -30,7 +30,8 @@ options: command: choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ] description: - - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. Other commands can be entered, but will fail if they're unknown to Django. + - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. + - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag. required: true app_path: description: @@ -89,10 +90,10 @@ notes: - I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified. - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location. - This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately. - - To be able to use the migrate command, you must have south installed and added as an app in your settings + - To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings - To be able to use the collectstatic command, you must have enabled staticfiles in your settings requirements: [ "virtualenv", "django" ] -author: Scott Anderson +author: "Scott Anderson (@tastychutney)" ''' EXAMPLES = """ @@ -102,7 +103,7 @@ EXAMPLES = """ # Load the initial_data fixture into the application - django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }} -#Run syncdb on the application +# Run syncdb on the application - django_manage: > command=syncdb app_path={{ django_dir }} @@ -110,8 +111,11 @@ EXAMPLES = """ pythonpath={{ settings_dir }} virtualenv={{ virtualenv_dir }} -#Run the SmokeTest test case from the main app. Useful for testing deploys. -- django_manage: command=test app_path=django_dir apps=main.SmokeTest +# Run the SmokeTest test case from the main app. Useful for testing deploys. +- django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest + +# Create an initial superuser. +- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }} """ @@ -159,7 +163,10 @@ def syncdb_filter_output(line): return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line) def migrate_filter_output(line): - return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) + return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line) + +def collectstatic_filter_output(line): + return "0 static files" not in line def main(): command_allowed_param_map = dict( @@ -218,7 +225,7 @@ def main(): ) command = module.params['command'] - app_path = module.params['app_path'] + app_path = os.path.expanduser(module.params['app_path']) virtualenv = module.params['virtualenv'] for param in specific_params: @@ -234,7 +241,7 @@ def main(): _ensure_virtualenv(module) - cmd = "python manage.py %s" % (command, ) + cmd = "./manage.py %s" % (command, ) if command in noinput_commands: cmd = '%s --noinput' % cmd diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 4a72ea37fec..361a131ef2d 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -46,7 +46,10 @@ options: choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] default: "apr_md5_crypt" description: - - Encryption scheme to be used. + - Encryption scheme to be used. As well as the four choices listed + here, you can also use any other hash supported by passlib, such as + md5_crypt and sha256_crypt, which are linux passwd hashes. If you + do so the password file will not be compatible with Apache or Nginx state: required: false choices: [ present, absent ] @@ -66,7 +69,7 @@ notes: - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." requires: [ passlib>=1.6 ] -author: Lorin Hochstein +author: "Lorin Hochstein (@lorin)" """ EXAMPLES = """ @@ -74,20 +77,25 @@ EXAMPLES = """ - htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640 # Remove a user from a password file - htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent +# Add a user to a password file suitable for use by libpam-pwdfile +- htpasswd: path=/etc/mail/passwords name=alex password=oedu2eGh crypt_scheme=md5_crypt """ import os +import tempfile from distutils.version import StrictVersion try: - from passlib.apache import HtpasswdFile + from passlib.apache import HtpasswdFile, htpasswd_context + from passlib.context import CryptContext import passlib except ImportError: passlib_installed = False else: passlib_installed = True +apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] def create_missing_directories(dest): destpath = os.path.dirname(dest) @@ -99,6 +107,10 @@ def present(dest, username, password, crypt_scheme, create, check_mode): """ Ensures user is present Returns (msg, changed) """ + if crypt_scheme in apache_hashes: + context = htpasswd_context + else: + context = CryptContext(schemes = [ crypt_scheme ] + apache_hashes) if not os.path.exists(dest): if not create: raise ValueError('Destination %s does not exist' % dest) @@ -106,9 +118,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode): return ("Create %s" % dest, True) create_missing_directories(dest) if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme) + ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) else: - ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme) + ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) if getattr(ht, 'set_password', None): ht.set_password(username, password) else: @@ -117,9 +129,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode): return ("Created %s and added %s" % (dest, username), True) else: if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme) + ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) else: - ht = HtpasswdFile(dest, default=crypt_scheme) + ht = HtpasswdFile(dest, default=crypt_scheme, context=context) found = None if getattr(ht, 'check_password', None): @@ -178,7 +190,7 @@ def main(): path=dict(required=True, aliases=["dest", "destfile"]), name=dict(required=True, aliases=["username"]), password=dict(required=False, default=None), - crypt_scheme=dict(required=False, default=None), + crypt_scheme=dict(required=False, default="apr_md5_crypt"), state=dict(required=False, default="present"), create=dict(type='bool', default='yes'), @@ -198,6 +210,36 @@ def main(): if not passlib_installed: module.fail_json(msg="This module requires the passlib Python library") + # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. + try: + f = open(path, "r") + except IOError: + # No preexisting file to remove blank lines from + f = None + else: + try: + lines = f.readlines() + finally: + f.close() + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + break + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + f = open(path, "w") + try: + [ f.write(line) for line in lines if line.strip() ] + finally: + f.close() + try: if state == 'present': (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index f75992b9a6a..43fa95467fb 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -30,7 +30,7 @@ version_added: "0.7" options: name: description: - - The name of the supervisord program or group to manage. + - The name of the supervisord program or group to manage. - The name will be taken as group name when it ends with a colon I(:) - Group support is only available in Ansible version 1.6 or later. required: true @@ -64,7 +64,7 @@ options: - The desired state of program/group. required: true default: null - choices: [ "present", "started", "stopped", "restarted" ] + choices: [ "present", "started", "stopped", "restarted", "absent" ] supervisorctl_path: description: - path to supervisorctl executable @@ -75,7 +75,9 @@ notes: - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). requirements: [ "supervisorctl" ] -author: Matt Wright, Aaron Wang +author: + - "Matt Wright (@mattupstate)" + - "Aaron Wang (@inetfuture) " ''' EXAMPLES = ''' @@ -101,7 +103,7 @@ def main(): username=dict(required=False), password=dict(required=False), supervisorctl_path=dict(required=False), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped']) + state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent']) ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) @@ -183,18 +185,36 @@ def main(): if module.check_mode: module.exit_json(changed=True) for process_name in to_take_action_on: - rc, out, err = run_supervisorctl(action, process_name) + rc, out, err = run_supervisorctl(action, process_name, check_rc=True) if '%s: %s' % (process_name, expected_result) not in out: module.fail_json(msg=out) module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) if state == 'restarted': - rc, out, err = run_supervisorctl('update') + rc, out, err = run_supervisorctl('update', check_rc=True) processes = get_matched_processes() + if not processes: + module.fail_json(name=name, msg="ERROR (no such process)") + take_action_on_processes(processes, lambda s: True, 'restart', 'started') processes = get_matched_processes() + if not processes: + module.fail_json(name=name, msg="ERROR (no such process)") + + if state == 'absent': + if len(processes) == 0: + module.exit_json(changed=False, name=name, state=state) + + if module.check_mode: + module.exit_json(changed=True) + run_supervisorctl('reread', check_rc=True) + rc, out, err = run_supervisorctl('remove', name) + if '%s: removed process group' % name in out: + module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) if state == 'present': if len(processes) > 0: diff --git a/windows/setup.ps1 b/windows/setup.ps1 index c249251d974..32b4d865263 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -25,6 +25,7 @@ $result = New-Object psobject @{ changed = $false }; +$win32_os = Get-WmiObject Win32_OperatingSystem $osversion = [Environment]::OSVersion $memory = @() $memory += Get-WmiObject win32_Physicalmemory @@ -53,10 +54,13 @@ foreach ($adapter in $ActiveNetcfg) Set-Attr $result.ansible_facts "ansible_interfaces" $formattednetcfg +Set-Attr $result.ansible_facts "ansible_architecture" $win32_os.OSArchitecture + Set-Attr $result.ansible_facts "ansible_hostname" $env:COMPUTERNAME; Set-Attr $result.ansible_facts "ansible_fqdn" "$([System.Net.Dns]::GetHostByName((hostname)).HostName)" Set-Attr $result.ansible_facts "ansible_system" $osversion.Platform.ToString() Set-Attr $result.ansible_facts "ansible_os_family" "Windows" +Set-Attr $result.ansible_facts "ansible_os_name" $win32_os.Name.Split('|')[0] Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString() diff --git a/windows/win_copy.ps1 b/windows/win_copy.ps1 new file mode 100644 index 00000000000..4a83e091c56 --- /dev/null +++ b/windows/win_copy.ps1 @@ -0,0 +1,104 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args + +$src= Get-Attr $params "src" $FALSE +If ($src -eq $FALSE) +{ + Fail-Json (New-Object psobject) "missing required argument: src" +} + +$dest= Get-Attr $params "dest" $FALSE +If ($dest -eq $FALSE) +{ + Fail-Json (New-Object psobject) "missing required argument: dest" +} + +$original_basename = Get-Attr $params "original_basename" $FALSE +If ($original_basename -eq $FALSE) +{ + Fail-Json (New-Object psobject) "missing required argument: original_basename " +} + +$result = New-Object psobject @{ + changed = $FALSE + original_basename = $original_basename +} + +# original_basename gets set if src and dest are dirs +# but includes subdir if the source folder contains sub folders +# e.g. you could get subdir/foo.txt + +# detect if doing recursive folder copy and create any non-existent destination sub folder +$parent = Split-Path -Path $original_basename -Parent +if ($parent.length -gt 0) +{ + $dest_folder = Join-Path $dest $parent + New-Item -Force $dest_folder -Type directory +} + +# if $dest is a dir, append $original_basename so the file gets copied with its intended name. +if (Test-Path $dest -PathType Container) +{ + $dest = Join-Path $dest $original_basename +} + +$dest_checksum = Get-FileChecksum ($dest) +$src_checksum = Get-FileChecksum ($src) + +If ($src_checksum.Equals($dest_checksum)) +{ + # if both are "3" then both are folders, ok to copy + If ($src_checksum.Equals("3")) + { + # New-Item -Force creates subdirs for recursive copies + New-Item -Force $dest -Type file + Copy-Item -Path $src -Destination $dest -Force + $result.operation = "folder_copy" + } + +} +ElseIf (! $src_checksum.Equals($dest_checksum)) +{ + If ($src_checksum.Equals("3")) + { + Fail-Json (New-Object psobject) "If src is a folder, dest must also be a folder" + } + # The checksums don't match, there's something to do + Copy-Item -Path $src -Destination $dest -Force + $result.operation = "file_copy" +} + +# verify before we return that the file has changed +$dest_checksum = Get-FileChecksum ($dest) +If ( $src_checksum.Equals($dest_checksum)) +{ + $result.changed = $TRUE +} +Else +{ + Fail-Json (New-Object psobject) "src checksum $src_checksum did not match dest_checksum $dest_checksum Failed to place file $original_basename in $dest" +} +# generate return values + +$info = Get-Item $dest +$result.size = $info.Length + +Exit-Json $result diff --git a/windows/win_copy.py b/windows/win_copy.py new file mode 100644 index 00000000000..acc6c9ef2e0 --- /dev/null +++ b/windows/win_copy.py @@ -0,0 +1,105 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Jon Hawkesworth (@jhawkesworth) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: win_copy +version_added: "1.9.2" +short_description: Copies files to remote locations on windows hosts. +description: + - The M(win_copy) module copies a file on the local box to remote windows locations. +options: + src: + description: + - Local path to a file to copy to the remote server; can be absolute or relative. + If path is a directory, it is copied recursively. In this case, if path ends + with "/", only inside contents of that directory are copied to destination. + Otherwise, if it does not end with "/", the directory itself with all contents + is copied. This behavior is similar to Rsync. + required: false + default: null + aliases: [] + dest: + description: + - Remote absolute path where the file should be copied to. If src is a directory, + this must be a directory too. Use \\ for path separators. + required: true + default: null +author: "Jon Hawkesworth (@jhawkesworth)" +notes: + - The "win_copy" module is best used for small files only. + This module should **not** be used for files bigger than 3Mb as + this will result in a 500 response from the winrm host + and it will not be possible to connect via winrm again until the + windows remote management service has been restarted on the + windows host. + Files larger than 1Mb will take minutes to transfer. + The recommended way to transfer large files is using win_get_url + or collecting from a windows file share folder. +''' + +EXAMPLES = ''' +# Copy a single file +- win_copy: src=/srv/myfiles/foo.conf dest=c:\\TEMP\\foo.conf + +# Copy the contents of files/temp_files dir into c:\temp\. Includes any sub dirs under files/temp_files +# Note the use of unix style path in the dest. +# This is necessary because \ is yaml escape sequence +- win_copy: src=files/temp_files/ dest=c:/temp/ + +# Copy the files/temp_files dir and any files or sub dirs into c:\temp +# Copies the folder because there is no trailing / on 'files/temp_files' +- win_copy: src=files/temp_files dest=c:/temp/ + +''' +RETURN = ''' +dest: + description: destination file/path + returned: changed + type: string + sample: "c:/temp/" +src: + description: source file used for the copy on the target machine + returned: changed + type: string + sample: "/home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source" +checksum: + description: checksum of the file after running copy + returned: success + type: string + sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827" +size: + description: size of the target, after execution + returned: changed (single files only) + type: int + sample: 1220 +operation: + description: whether a single file copy took place or a folder copy + returned: changed (single files only) + type: string + sample: "file_copy" +original_basename: + description: basename of the copied file + returned: changed (single files only) + type: string + sample: "foo.txt" +''' + diff --git a/windows/win_feature.ps1 b/windows/win_feature.ps1 index a54007b47bf..458d942e328 100644 --- a/windows/win_feature.ps1 +++ b/windows/win_feature.ps1 @@ -28,7 +28,7 @@ $result = New-Object PSObject -Property @{ } If ($params.name) { - $name = $params.name + $name = $params.name -split ',' | % { $_.Trim() } } Else { Fail-Json $result "mising required argument: name" diff --git a/windows/win_feature.py b/windows/win_feature.py index ef344ee3b22..2d7a747cea0 100644 --- a/windows/win_feature.py +++ b/windows/win_feature.py @@ -68,7 +68,9 @@ options: - no default: null aliases: [] -author: Paul Durivage / Trond Hindenes +author: + - "Paul Durivage (@angstwad)" + - "Trond Hindenes (@trondhindenes)" ''' EXAMPLES = ''' diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 new file mode 100644 index 00000000000..f8416120abf --- /dev/null +++ b/windows/win_file.ps1 @@ -0,0 +1,116 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args + +# path +$path = Get-Attr $params "path" $FALSE +If ($path -eq $FALSE) +{ + $path = Get-Attr $params "dest" $FALSE + If ($path -eq $FALSE) + { + $path = Get-Attr $params "name" $FALSE + If ($path -eq $FALSE) + { + Fail-Json (New-Object psobject) "missing required argument: path" + } + } +} + +# JH Following advice from Chris Church, only allow the following states +# in the windows version for now: +# state - file, directory, touch, absent +# (originally was: state - file, link, directory, hard, touch, absent) + +$state = Get-Attr $params "state" "unspecified" +# if state is not supplied, test the $path to see if it looks like +# a file or a folder and set state to file or folder + +# result +$result = New-Object psobject @{ + changed = $FALSE +} + +If ( $state -eq "touch" ) +{ + If(Test-Path $path) + { + (Get-ChildItem $path).LastWriteTime = Get-Date + } + Else + { + echo $null > $path + } + $result.changed = $TRUE +} + +If (Test-Path $path) +{ + $fileinfo = Get-Item $path + If ( $state -eq "absent" ) + { + Remove-Item -Recurse -Force $fileinfo + $result.changed = $TRUE + } + Else + { + # Only files have the .Directory attribute. + If ( $state -eq "directory" -and $fileinfo.Directory ) + { + Fail-Json (New-Object psobject) "path is not a directory" + } + + # Only files have the .Directory attribute. + If ( $state -eq "file" -and -not $fileinfo.Directory ) + { + Fail-Json (New-Object psobject) "path is not a file" + } + + } +} +Else +# doesn't yet exist +{ + If ( $state -eq "unspecified" ) + { + $basename = Split-Path -Path $path -Leaf + If ($basename.length -gt 0) + { + $state = "file" + } + Else + { + $state = "directory" + } + } + + If ( $state -eq "directory" ) + { + New-Item -ItemType directory -Path $path + $result.changed = $TRUE + } + + If ( $state -eq "file" ) + { + Fail-Json (New-Object psobject) "path will not be created" + } +} + +Exit-Json $result diff --git a/windows/win_file.py b/windows/win_file.py new file mode 100644 index 00000000000..062b4bfe92e --- /dev/null +++ b/windows/win_file.py @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Jon Hawkesworth (@jhawkesworth) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: win_file +version_added: "1.8" +short_description: Creates, touches or removes files or directories. +description: + - Creates (empty) files, updates file modification stamps of existing files, + and can create or remove directories. + Unlike M(file), does not modify ownership, permissions or manipulate links. +notes: + - See also M(win_copy), M(win_template), M(copy), M(template), M(assemble) +requirements: [ ] +author: "Jon Hawkesworth (@jhawkesworth)" +options: + path: + description: + - 'path to the file being managed. Aliases: I(dest), I(name)' + required: true + default: [] + aliases: ['dest', 'name'] + state: + description: + - If C(directory), all immediate subdirectories will be created if they + do not exist. + If C(file), the file will NOT be created if it does not exist, see the M(copy) + or M(template) module if you want that behavior. If C(absent), + directories will be recursively deleted, and files will be removed. + If C(touch), an empty file will be created if the c(path) does not + exist, while an existing file or directory will receive updated file access and + modification times (similar to the way `touch` works from the command line). + required: false + default: file + choices: [ file, directory, touch, absent ] +''' + +EXAMPLES = ''' +# create a file +- win_file: path=C:\\temp\\foo.conf + +# touch a file (creates if not present, updates modification time if present) +- win_file: path=C:\\temp\\foo.conf state=touch + +# remove a file, if present +- win_file: path=C:\\temp\\foo.conf state=absent + +# create directory structure +- win_file: path=C:\\temp\\folder\\subfolder state=directory + +# remove directory structure +- win_file: path=C:\\temp state=absent +''' diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index b555cc7a52c..46979c129f2 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -1,7 +1,7 @@ #!powershell # This file is part of Ansible. # -# Copyright 2014, Paul Durivage +# (c)) 2015, Paul Durivage , Tal Auslander # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -40,14 +40,40 @@ Else { Fail-Json $result "missing required argument: dest" } -$client = New-Object System.Net.WebClient +$force = Get-Attr -obj $params -name "force" "yes" | ConvertTo-Bool -Try { - $client.DownloadFile($url, $dest) - $result.changed = $true +If ($force -or -not (Test-Path $dest)) { + $client = New-Object System.Net.WebClient + + Try { + $client.DownloadFile($url, $dest) + $result.changed = $true + } + Catch { + Fail-Json $result "Error downloading $url to $dest" + } } -Catch { - Fail-Json $result "Error downloading $url to $dest" +Else { + Try { + $webRequest = [System.Net.HttpWebRequest]::Create($url) + $webRequest.IfModifiedSince = ([System.IO.FileInfo]$dest).LastWriteTime + $webRequest.Method = "GET" + [System.Net.HttpWebResponse]$webResponse = $webRequest.GetResponse() + + $stream = New-Object System.IO.StreamReader($response.GetResponseStream()) + + $stream.ReadToEnd() | Set-Content -Path $dest -Force -ErrorAction Stop + + $result.changed = $true + } + Catch [System.Net.WebException] { + If ($_.Exception.Response.StatusCode -ne [System.Net.HttpStatusCode]::NotModified) { + Fail-Json $result "Error downloading $url to $dest" + } + } + Catch { + Fail-Json $result "Error downloading $url to $dest" + } } Set-Attr $result.win_get_url "url" $url diff --git a/windows/win_get_url.py b/windows/win_get_url.py index 10910cf605e..a34f23890b5 100644 --- a/windows/win_get_url.py +++ b/windows/win_get_url.py @@ -27,21 +27,29 @@ module: win_get_url version_added: "1.7" short_description: Fetches a file from a given URL description: - - Fetches a file from a URL and saves to locally + - Fetches a file from a URL and saves to locally options: url: description: - The full URL of a file to download required: true default: null - aliases: [] dest: description: - - The absolute path of the location to save the file at the URL. Be sure to include a filename and extension as appropriate. + - The absolute path of the location to save the file at the URL. Be sure + to include a filename and extension as appropriate. + required: true + default: null + force: + description: + - If C(yes), will always download the file. If C(no), will only + download the file if it does not exist or the remote file has been + modified more recently than the local file. + version_added: "2.0" required: false + choices: [ "yes", "no" ] default: yes - aliases: [] -author: Paul Durivage +author: "Paul Durivage (@angstwad)" ''' EXAMPLES = ''' @@ -54,4 +62,10 @@ $ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthr win_get_url: url: 'http://www.example.com/earthrise.jpg' dest: 'C:\Users\RandomUser\earthrise.jpg' + +- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' only if modified + win_get_url: + url: 'http://www.example.com/earthrise.jpg' + dest: 'C:\Users\RandomUser\earthrise.jpg' + force: no ''' diff --git a/windows/win_group.py b/windows/win_group.py index 2013b52be53..5e8b0adaaf2 100644 --- a/windows/win_group.py +++ b/windows/win_group.py @@ -50,7 +50,7 @@ options: - absent default: present aliases: [] -author: Chris Hoffman +author: "Chris Hoffman (@chrishoffman)" ''' EXAMPLES = ''' diff --git a/windows/win_msi.py b/windows/win_msi.py index 9eb6f1bafa5..01f09709f57 100644 --- a/windows/win_msi.py +++ b/windows/win_msi.py @@ -45,7 +45,7 @@ options: description: - Path to a file created by installing the MSI to prevent from attempting to reinstall the package on every run -author: Matt Martz +author: "Matt Martz (@sivel)" ''' EXAMPLES = ''' diff --git a/windows/win_ping.py b/windows/win_ping.py index de32877d615..ecb5149f8c3 100644 --- a/windows/win_ping.py +++ b/windows/win_ping.py @@ -35,7 +35,7 @@ options: required: false default: 'pong' aliases: [] -author: Chris Church +author: "Chris Church (@cchurch)" ''' EXAMPLES = ''' diff --git a/windows/win_service.py b/windows/win_service.py index c378be120b1..1f0f6326e65 100644 --- a/windows/win_service.py +++ b/windows/win_service.py @@ -55,7 +55,7 @@ options: - restarted default: null aliases: [] -author: Chris Hoffman +author: "Chris Hoffman (@chrishoffman)" ''' EXAMPLES = ''' diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 4e4c55b2aa3..51c9c827093 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -19,6 +19,11 @@ $params = Parse-Args $args; +function Date_To_Timestamp($start_date, $end_date) +{ + Write-Output (New-TimeSpan -Start $start_date -End $end_date).TotalSeconds +} + $path = Get-Attr $params "path" $FALSE; If ($path -eq $FALSE) { @@ -36,6 +41,7 @@ If (Test-Path $path) { Set-Attr $result.stat "exists" $TRUE; $info = Get-Item $path; + $epoch_date = Get-Date -Date "01/01/1970" If ($info.Directory) # Only files have the .Directory attribute. { Set-Attr $result.stat "isdir" $FALSE; @@ -45,6 +51,12 @@ If (Test-Path $path) { Set-Attr $result.stat "isdir" $TRUE; } + Set-Attr $result.stat "extension" $info.Extension; + Set-Attr $result.stat "attributes" $info.Attributes.ToString(); + Set-Attr $result.stat "owner" $info.GetAccessControl().Owner; + Set-Attr $result.stat "creationtime" (Date_To_Timestamp $epoch_date $info.CreationTime); + Set-Attr $result.stat "lastaccesstime" (Date_To_Timestamp $epoch_date $info.LastAccessTime); + Set-Attr $result.stat "lastwritetime" (Date_To_Timestamp $epoch_date $info.LastWriteTime); } Else { @@ -53,11 +65,9 @@ Else If ($get_md5 -and $result.stat.exists -and -not $result.stat.isdir) { - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; - $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); - $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); - $fp.Dispose(); + $hash = Get-FileChecksum($path); Set-Attr $result.stat "md5" $hash; + Set-Attr $result.stat "checksum" $hash; } Exit-Json $result; diff --git a/windows/win_stat.py b/windows/win_stat.py index c98cd55f599..a933384e20b 100644 --- a/windows/win_stat.py +++ b/windows/win_stat.py @@ -38,7 +38,7 @@ options: required: false default: yes aliases: [] -author: Chris Church +author: "Chris Church (@cchurch)" ''' EXAMPLES = ''' diff --git a/windows/win_template.py b/windows/win_template.py new file mode 100644 index 00000000000..c384ad7775f --- /dev/null +++ b/windows/win_template.py @@ -0,0 +1,49 @@ +# this is a virtual module that is entirely implemented server side + +DOCUMENTATION = ''' +--- +module: win_template +version_added: "1.9.2" +short_description: Templates a file out to a remote server. +description: + - Templates are processed by the Jinja2 templating language + (U(http://jinja.pocoo.org/docs/)) - documentation on the template + formatting can be found in the Template Designer Documentation + (U(http://jinja.pocoo.org/docs/templates/)). + - "Six additional variables can be used in templates: C(ansible_managed) + (configurable via the C(defaults) section of C(ansible.cfg)) contains a string + which can be used to describe the template name, host, modification time of the + template file and the owner uid, C(template_host) contains the node name of + the template's machine, C(template_uid) the owner, C(template_path) the + absolute path of the template, C(template_fullpath) is the absolute path of the + template, and C(template_run_date) is the date that the template was rendered. Note that including + a string that uses a date in the template will result in the template being marked 'changed' + each time." +options: + src: + description: + - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. + required: true + dest: + description: + - Location to render the template to on the remote machine. + required: true +notes: + - "templates are loaded with C(trim_blocks=True)." + - By default, windows line endings are not created in the generated file. + - "In order to ensure windows line endings are in the generated file, add the following header + as the first line of your template: #jinja2: newline_sequence:'\r\n' and ensure each line + of the template ends with \r\n" + - Beware fetching files from windows machines when creating templates + because certain tools, such as Powershell ISE, and regedit's export facility + add a Byte Order Mark as the first character of the file, which can cause tracebacks. + - Use "od -cx" to examine your templates for Byte Order Marks. +author: "Jon Hawkesworth (@jhawkesworth)" +''' + +EXAMPLES = ''' +# Playbook Example (win_template can only be run inside a playbook) +- win_template: src=/mytemplates/file.conf.j2 dest=C:\\temp\\file.conf + + +''' diff --git a/windows/win_user.ps1 b/windows/win_user.ps1 index ae4847a8528..b7be7e4eea3 100644 --- a/windows/win_user.ps1 +++ b/windows/win_user.ps1 @@ -146,6 +146,7 @@ If ($state -eq 'present') { If ($password -ne $null) { $user_obj.SetPassword($password) } + $user_obj.SetInfo() $result.changed = $true } ElseIf (($password -ne $null) -and ($update_password -eq 'always')) { diff --git a/windows/win_user.py b/windows/win_user.py index 82bcf0897ec..376ff487fb3 100644 --- a/windows/win_user.py +++ b/windows/win_user.py @@ -127,7 +127,9 @@ options: - query default: present aliases: [] -author: Paul Durivage / Chris Church +author: + - "Paul Durivage (@angstwad)" + - "Chris Church (@cchurch)" ''' EXAMPLES = '''