From 0f458210bcc274041859bf6f26af06f4dc17000a Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Tue, 13 Aug 2013 09:30:56 -0400 Subject: [PATCH 1/4] Rebase attempt No idea if I'm rebasing properly or not. This is my first attempt. --- library/cloud/cloudformation | 37 ++++++++++---- library/cloud/ec2 | 85 ++++++++++++++++++++----------- library/cloud/ec2_ami | 99 ++++++++++++++++++++++-------------- library/cloud/ec2_elb | 92 ++++++++++++++++----------------- library/cloud/ec2_vol | 80 +++++++++++++++++++++-------- library/cloud/rds | 77 +++++++++++++--------------- library/cloud/route53 | 43 +++++++++------- library/cloud/s3 | 40 +++++++++------ 8 files changed, 335 insertions(+), 218 deletions(-) diff --git a/library/cloud/cloudformation b/library/cloud/cloudformation index 985f1dc6e56..e827c34f9ac 100644 --- a/library/cloud/cloudformation +++ b/library/cloud/cloudformation @@ -43,10 +43,10 @@ options: aliases: [] region: description: - - The AWS region the stack will be launched in + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: true default: null - aliases: [] + aliases: ['aws_region', 'ec2_region'] state: description: - If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated. @@ -81,10 +81,24 @@ tasks: ClusterSize: 3 ''' -import boto.cloudformation.connection import json import time +try: + import boto.cloudformation.connection +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + +AWS_REGIONS = ['ap-northeast-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'eu-west-1', + 'sa-east-1', + 'us-east-1', + 'us-west-1', + 'us-west-2'] + class Region: def __init__(self, region): '''connects boto to the region specified in the cloudformation template''' @@ -146,11 +160,7 @@ def main(): argument_spec=dict( stack_name=dict(required=True), template_parameters=dict(required=False), - region=dict(required=True, - choices=['ap-northeast-1', 'ap-southeast-1', - 'ap-southeast-2', 'eu-west-1', - 'sa-east-1', 'us-east-1', 'us-west-1', - 'us-west-2']), + region=dict(aliases=['aws_region', 'ec2_region'], required=True, choices=AWS_REGIONS), state=dict(default='present', choices=['present', 'absent']), template=dict(default=None, required=True), disable_rollback=dict(default=False) @@ -159,16 +169,25 @@ def main(): state = module.params['state'] stack_name = module.params['stack_name'] - region = Region(module.params['region']) + r = module.params['region'] template_body = open(module.params['template'], 'r').read() disable_rollback = module.params['disable_rollback'] template_parameters = module.params['template_parameters'] + if not r: + if 'AWS_REGION' in os.environ: + r = os.environ['AWS_REGION'] + elif 'EC2_REGION' in os.environ: + r = os.environ['EC2_REGION'] + + + # convert the template parameters ansible passes into a tuple for boto template_parameters_tup = [(k, v) for k, v in template_parameters.items()] stack_outputs = {} try: + region = Region(r) cfn = boto.cloudformation.connection.CloudFormationConnection( region=region) except boto.exception.NoAuthHandlerFound, e: diff --git a/library/cloud/ec2 b/library/cloud/ec2 index 45b2e3d6483..9a338acaa42 100644 --- a/library/cloud/ec2 +++ b/library/cloud/ec2 @@ -50,17 +50,17 @@ options: region: version_added: "1.2" description: - - the EC2 region to use + - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: false default: null - aliases: [] + aliases: [ 'aws_region', 'ec2_region' ] zone: version_added: "1.2" description: - - availability zone in which to launch the instance + - AWS availability zone in which to launch the instance required: false default: null - aliases: [] + aliases: [ 'aws_zone', 'ec2_zone' ] instance_type: description: - instance type to use for the instance @@ -99,22 +99,22 @@ options: aliases: [] ec2_url: description: - - url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints) + - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used required: false default: null aliases: [] - ec2_secret_key: + aws_secret_key: description: - - EC2 secret key. If not specified then the EC2_SECRET_KEY environment variable is used. + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false default: null - aliases: [ EC2_SECRET_KEY ] - ec2_access_key: + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: description: - - EC2 access key. If not specified then the EC2_ACCESS_KEY environment variable is used. + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. required: false default: null - aliases: [ EC2_ACCESS_KEY ] + aliases: [ 'ec2_access_key', 'access_key' ] count: description: - number of instances to launch @@ -190,6 +190,9 @@ author: Seth Vidal, Tim Gerla, Lester Wade ''' EXAMPLES = ''' +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + # Basic provisioning example - local_action: module: ec2 @@ -282,6 +285,15 @@ local_action: import sys import time +AWS_REGIONS = ['ap-northeast-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'eu-west-1', + 'sa-east-1', + 'us-east-1', + 'us-west-1', + 'us-west-2'] + try: import boto.ec2 from boto.exception import EC2ResponseError @@ -517,8 +529,8 @@ def main(): id = dict(), group = dict(type='list'), group_id = dict(), - region = dict(choices=['eu-west-1', 'sa-east-1', 'us-east-1', 'ap-northeast-1', 'us-west-2', 'us-west-1', 'ap-southeast-1', 'ap-southeast-2']), - zone = dict(), + region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), + zone = dict(aliases=['aws_zone', 'ec2_zone']), instance_type = dict(aliases=['type']), image = dict(), kernel = dict(), @@ -527,9 +539,9 @@ def main(): ramdisk = dict(), wait = dict(choices=BOOLEANS, default=False), wait_timeout = dict(default=300), - ec2_url = dict(aliases=['EC2_URL']), - ec2_secret_key = dict(aliases=['EC2_SECRET_KEY'], no_log=True), - ec2_access_key = dict(aliases=['EC2_ACCESS_KEY']), + ec2_url = dict(), + aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), + aws_access_key = dict(aliases=['ec2_access_key', 'access_key']), placement_group = dict(), user_data = dict(), instance_tags = dict(), @@ -542,34 +554,47 @@ def main(): ) ec2_url = module.params.get('ec2_url') - ec2_secret_key = module.params.get('ec2_secret_key') - ec2_access_key = module.params.get('ec2_access_key') + aws_secret_key = module.params.get('aws_secret_key') + aws_access_key = module.params.get('aws_access_key') region = module.params.get('region') - # allow eucarc environment variables to be used if ansible vars aren't set + # allow eucarc environment variables to be used if ansible vars aren't set if not ec2_url and 'EC2_URL' in os.environ: ec2_url = os.environ['EC2_URL'] - if not ec2_secret_key and 'EC2_SECRET_KEY' in os.environ: - ec2_secret_key = os.environ['EC2_SECRET_KEY'] - if not ec2_access_key and 'EC2_ACCESS_KEY' in os.environ: - ec2_access_key = os.environ['EC2_ACCESS_KEY'] + + if not aws_secret_key: + if 'AWS_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['AWS_SECRET_KEY'] + elif 'EC2_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['EC2_SECRET_KEY'] + + if not aws_access_key: + if 'AWS_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['AWS_ACCESS_KEY'] + elif 'EC2_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['EC2_ACCESS_KEY'] + + if not region: + if 'AWS_REGION' in os.environ: + region = os.environ['AWS_REGION'] + elif 'EC2_REGION' in os.environ: + region = os.environ['EC2_REGION'] # If we have a region specified, connect to its endpoint. if region: try: - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=ec2_access_key, aws_secret_access_key=ec2_secret_key) + ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) - # Otherwise, no region so we fallback to the old connection method - else: + # If we specified an ec2_url then try connecting to it + elif ec2_url: try: - if ec2_url: # if we have an URL set, connect to the specified endpoint - ec2 = boto.connect_ec2_endpoint(ec2_url, ec2_access_key, ec2_secret_key) - else: # otherwise it's Amazon. - ec2 = boto.connect_ec2(ec2_access_key, ec2_secret_key) + ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) + else: + module.fail_json(msg="Either region or ec2_url must be specified") if module.params.get('state') == 'absent': instance_ids = module.params.get('instance_ids') diff --git a/library/cloud/ec2_ami b/library/cloud/ec2_ami index 265884c1e99..6aa380b6643 100644 --- a/library/cloud/ec2_ami +++ b/library/cloud/ec2_ami @@ -23,22 +23,22 @@ description: options: ec2_url: description: - - url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints) + - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used required: false default: null aliases: [] - ec2_secret_key: + aws_secret_key: description: - - ec2 secret key + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false default: null - aliases: [] - ec2_access_key: + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: description: - - ec2 access key + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. required: false default: null - aliases: [] + aliases: ['ec2_access_key', 'access_key' ] instance_id: description: - instance id of the image to create @@ -71,10 +71,10 @@ options: aliases: [] region: description: - - the EC2 region to use + - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: false default: null - aliases: [] + aliases: [ 'aws_region', 'ec2_region' ] description: description: - An optional human-readable string describing the contents and purpose of the AMI. @@ -113,8 +113,8 @@ EXAMPLES = ''' # Basic AMI Creation - local_action: module: ec2_ami - ec2_access_key: xxxxxxxxxxxxxxxxxxxxxxx - ec2_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx + aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx instance_id: i-xxxxxx wait: yes name: newtest @@ -123,8 +123,9 @@ EXAMPLES = ''' # Basic AMI Creation, without waiting - local_action: module: ec2_ami - ec2_access_key: xxxxxxxxxxxxxxxxxxxxxxx - ec2_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx + aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + region: xxxxxx instance_id: i-xxxxxx wait: no name: newtest @@ -133,8 +134,9 @@ EXAMPLES = ''' # Deregister/Delete AMI - local_action: module: ec2_ami - ec2_access_key: xxxxxxxxxxxxxxxxxxxxxxx - ec2_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx + aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + region: xxxxxx image_id: ${instance.image_id} delete_snapshot: True state: absent @@ -142,8 +144,9 @@ EXAMPLES = ''' # Deregister AMI - local_action: module: ec2_ami - ec2_access_key: xxxxxxxxxxxxxxxxxxxxxxx - ec2_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx + aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + region: xxxxxx image_id: ${instance.image_id} delete_snapshot: False state: absent @@ -152,6 +155,15 @@ EXAMPLES = ''' import sys import time +AWS_REGIONS = ['ap-northeast-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'eu-west-1', + 'sa-east-1', + 'us-east-1', + 'us-west-1', + 'us-west-2'] + try: import boto.ec2 except ImportError: @@ -235,9 +247,9 @@ def deregister_image(module, ec2): def main(): module = AnsibleModule( argument_spec = dict( - ec2_url = dict(aliases=['EC2_URL']), - ec2_secret_key = dict(aliases=['EC2_SECRET_KEY'], no_log=True), - ec2_access_key = dict(aliases=['EC2_ACCESS_KEY']), + ec2_url = dict(), + aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), + aws_access_key = dict(aliases=['ec2_access_key', 'access_key']), instance_id = dict(), image_id = dict(), delete_snapshot = dict(), @@ -247,38 +259,51 @@ def main(): description = dict(default=""), no_reboot = dict(default=True, type="bool"), state = dict(default='present'), - region = dict() + region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS) ) ) ec2_url = module.params.get('ec2_url') - ec2_secret_key = module.params.get('ec2_secret_key') - ec2_access_key = module.params.get('ec2_access_key') + aws_secret_key = module.params.get('aws_secret_key') + aws_access_key = module.params.get('aws_access_key') region = module.params.get('region') # allow eucarc environment variables to be used if ansible vars aren't set if not ec2_url and 'EC2_URL' in os.environ: ec2_url = os.environ['EC2_URL'] - if not ec2_secret_key and 'EC2_SECRET_KEY' in os.environ: - ec2_secret_key = os.environ['EC2_SECRET_KEY'] - if not ec2_access_key and 'EC2_ACCESS_KEY' in os.environ: - ec2_access_key = os.environ['EC2_ACCESS_KEY'] + + if not aws_secret_key: + if 'AWS_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['AWS_SECRET_KEY'] + elif 'EC2_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['EC2_SECRET_KEY'] + + if not aws_access_key: + if 'AWS_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['AWS_ACCESS_KEY'] + elif 'EC2_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['EC2_ACCESS_KEY'] + + if not region: + if 'AWS_REGION' in os.environ: + region = os.environ['AWS_REGION'] + elif 'EC2_REGION' in os.environ: + region = os.environ['EC2_REGION'] # If we have a region specified, connect to its endpoint. if region: try: - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=ec2_access_key, aws_secret_access_key=ec2_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(" %s %s %s " % (region, ec2_access_key, ec2_secret_key))) - # Otherwise, no region so we fallback to the old connection method - else: - try: - if ec2_url: # if we have an URL set, connect to the specified endpoint - ec2 = boto.connect_ec2_endpoint(ec2_url, ec2_access_key, ec2_secret_key) - else: # otherwise it's Amazon. - ec2 = boto.connect_ec2(ec2_access_key, ec2_secret_key) + ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) + # If we specified an ec2_url then try connecting to it + elif ec2_url: + try: + ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + else: + module.fail_json(msg="Either region or ec2_url must be specified") if module.params.get('state') == 'absent': if not module.params.get('image_id'): diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb index 0c130981f97..a8131c2f48b 100644 --- a/library/cloud/ec2_elb +++ b/library/cloud/ec2_elb @@ -25,7 +25,7 @@ description: if state=absent is passed as an argument. - Will be marked changed when called only if there are ELBs found to operate on. version_added: "1.2" -requirements: [ "boto", "urllib2" ] +requirements: [ "boto" ] author: John Jarvis options: state: @@ -43,21 +43,23 @@ options: - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. required: false default: None - ec2_secret_key: + aws_secret_key: description: - - AWS Secret API key + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + def2ault: None + aliases: ['ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. required: false default: None - ec2_access_key: + aliases: ['ec2_access_key', 'access_key' ] + region: description: - - AWS Access API key - required: false - default: None - ec2_region: - description: - - AWS region of your load balancer. If not set then the region in which - this module is running will be used. + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: false + aliases: ['aws_region', 'ec2_region'] """ @@ -103,22 +105,16 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -try: - import urllib2 -except ImportError: - print "failed=True msg='urllib2 required for this module'" - sys.exit(1) - class ElbManager: """Handles EC2 instance ELB registration and de-registration""" def __init__(self, module, instance_id=None, ec2_elbs=None, - ec2_access_key=None, ec2_secret_key=None, ec2_region=None): - self.ec2_access_key = ec2_access_key - self.ec2_secret_key = ec2_secret_key + aws_access_key=None, aws_secret_key=None, region=None): + self.aws_access_key = aws_access_key + self.aws_secret_key = aws_secret_key self.module = module self.instance_id = instance_id - self.ec2_region = ec2_region + self.region = region self.lbs = self._get_instance_lbs(ec2_elbs) # if there are no ELBs to operate on @@ -174,9 +170,9 @@ class ElbManager: are attached to self.instance_id""" try: - endpoint="elasticloadbalancing.%s.amazonaws.com" % self.ec2_region - connect_region = RegionInfo(name=self.ec2_region, endpoint=endpoint) - elb = boto.ec2.elb.ELBConnection(self.ec2_access_key, self.ec2_secret_key, region=connect_region) + endpoint="elasticloadbalancing.%s.amazonaws.com" % self.region + connect_region = RegionInfo(name=self.region, endpoint=endpoint) + elb = boto.ec2.elb.ELBConnection(self.aws_access_key, self.aws_secret_key, region=connect_region) except boto.exception.NoAuthHandlerFound, e: self.module.fail_json(msg=str(e)) @@ -201,42 +197,44 @@ def main(): 'choices': ['present', 'absent']}, instance_id={'required': True}, ec2_elbs={'default': None, 'required': False}, - ec2_secret_key={'default': None, 'aliases': ['EC2_SECRET_KEY']}, - ec2_access_key={'default': None, 'aliases': ['EC2_ACCESS_KEY']}, - ec2_region={'default': None, 'required': False, 'choices':AWS_REGIONS} + aws_secret_key={'default': None, 'aliases': ['ec2_secret_key', 'secret_key'], 'no_log': True}, + aws_access_key={'default': None, 'aliases': ['ec2_access_key', 'access_key']}, + region={'default': None, 'required': False, 'aliases':['aws_region', 'ec2_region'], 'choices':AWS_REGIONS} ) ) - ec2_secret_key = module.params['ec2_secret_key'] - ec2_access_key = module.params['ec2_access_key'] + aws_secret_key = module.params['aws_secret_key'] + aws_access_key = module.params['aws_access_key'] ec2_elbs = module.params['ec2_elbs'] - ec2_region = module.params['ec2_region'] + region = module.params['region'] if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: module.fail_json(msg="ELBs are required for registration") - if not ec2_secret_key and 'EC2_SECRET_KEY' in os.environ: - ec2_secret_key = os.environ['EC2_SECRET_KEY'] - if not ec2_access_key and 'EC2_ACCESS_KEY' in os.environ: - ec2_access_key = os.environ['EC2_ACCESS_KEY'] + if not aws_secret_key: + if 'AWS_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['AWS_SECRET_KEY'] + elif 'EC2_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['EC2_SECRET_KEY'] - if not ec2_region and 'EC2_REGION' in os.environ: - ec2_region = os.environ['EC2_REGION'] + if not aws_access_key: + if 'AWS_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['AWS_ACCESS_KEY'] + elif 'EC2_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['EC2_ACCESS_KEY'] - if not ec2_region: - response = urllib2.urlopen('http://169.254.169.254/latest/meta-data/placement/availability-zone') - az = response.read() - for r in AWS_REGIONS: - if az.startswith(r): - ec2_region = r - break + if not region: + if 'AWS_REGION' in os.environ: + region = os.environ['AWS_REGION'] + elif 'EC2_REGION' in os.environ: + region = os.environ['EC2_REGION'] - if not ec2_region: - module.fail_json(msg = str("ec2_region not specified and unable to determine region from AWS.")) + if not region: + module.fail_json(msg = str("Either region or EC2_REGION environment variable must be set.")) instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, ec2_access_key, - ec2_secret_key, ec2_region=ec2_region) + elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key, + aws_secret_key, region=region) for elb in [ ec2_elbs ]: if not elb_man.exists(elb): diff --git a/library/cloud/ec2_vol b/library/cloud/ec2_vol index 10f30ecfe01..784f07edd39 100644 --- a/library/cloud/ec2_vol +++ b/library/cloud/ec2_vol @@ -22,6 +22,24 @@ description: - creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto. version_added: "1.1" options: + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: None + aliases: ['ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: None + aliases: ['ec2_access_key', 'access_key' ] + ec2_url: + description: + - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used + required: false + default: null + aliases: [] instance: description: - instance ID if you wish to attach the volume. @@ -49,16 +67,16 @@ options: aliases: [] region: description: - - region in which to create the volume + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: false default: null - aliases: [] + aliases: ['aws_region', 'ec2_region'] zone: description: - zone in which to create the volume, if unset uses the zone the instance is in (if set) required: false default: null - aliases: [] + aliases: ['aws_zone', 'ec2_zone'] requirements: [ "boto" ] author: Lester Wade ''' @@ -109,6 +127,15 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) +AWS_REGIONS = ['ap-northeast-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'eu-west-1', + 'sa-east-1', + 'us-east-1', + 'us-west-1', + 'us-west-2'] + def main(): module = AnsibleModule( argument_spec = dict( @@ -116,11 +143,11 @@ def main(): volume_size = dict(required=True), iops = dict(), device_name = dict(), - region = dict(), - zone = dict(), - ec2_url = dict(aliases=['EC2_URL']), - ec2_secret_key = dict(aliases=['EC2_SECRET_KEY']), - ec2_access_key = dict(aliases=['EC2_ACCESS_KEY']), + region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), + zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), + ec2_url = dict(), + aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), + aws_access_key = dict(aliases=['ec2_access_key', 'access_key']), ) ) @@ -131,32 +158,45 @@ def main(): region = module.params.get('region') zone = module.params.get('zone') ec2_url = module.params.get('ec2_url') - ec2_secret_key = module.params.get('ec2_secret_key') - ec2_access_key = module.params.get('ec2_access_key') + aws_secret_key = module.params.get('aws_secret_key') + aws_access_key = module.params.get('aws_access_key') # allow eucarc environment variables to be used if ansible vars aren't set if not ec2_url and 'EC2_URL' in os.environ: ec2_url = os.environ['EC2_URL'] - if not ec2_secret_key and 'EC2_SECRET_KEY' in os.environ: - ec2_secret_key = os.environ['EC2_SECRET_KEY'] - if not ec2_access_key and 'EC2_ACCESS_KEY' in os.environ: - ec2_access_key = os.environ['EC2_ACCESS_KEY'] + + if not aws_secret_key: + if 'AWS_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['AWS_SECRET_KEY'] + elif 'EC2_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['EC2_SECRET_KEY'] + + if not aws_access_key: + if 'AWS_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['AWS_ACCESS_KEY'] + elif 'EC2_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['EC2_ACCESS_KEY'] + + if not region: + if 'AWS_REGION' in os.environ: + region = os.environ['AWS_REGION'] + elif 'EC2_REGION' in os.environ: + region = os.environ['EC2_REGION'] # If we have a region specified, connect to its endpoint. if region: try: - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=ec2_access_key, aws_secret_access_key=ec2_secret_key) + ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) # Otherwise, no region so we fallback to the old connection method - else: + elif ec2_url: try: - if ec2_url: # if we have an URL set, connect to the specified endpoint - ec2 = boto.connect_ec2_endpoint(ec2_url, ec2_access_key, ec2_secret_key) - else: # otherwise it's Amazon. - ec2 = boto.connect_ec2(ec2_access_key, ec2_secret_key) + ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) + else: + module.fail_json(msg="Either region or ec2_url must be specified") # Here we need to get the zone info for the instance. This covers situation where # instance is specified but zone isn't. diff --git a/library/cloud/rds b/library/cloud/rds index cd336c4dccb..99b0fb5446e 100644 --- a/library/cloud/rds +++ b/library/cloud/rds @@ -73,12 +73,12 @@ options: required: false default: null aliases: [] - ec2_region: + region: description: - - the EC2 region to use. If not specified then the EC2_REGION environment variable is used. If neither exist then the AWS is queried for the region that the host invoking the module is located in. + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: true default: null - aliases: [] + aliases: [ 'aws_region', 'ec2_region' ] db_name: description: - Name of a database to create within the instance. If not specified then no database is created. Used only when command=create. @@ -165,7 +165,7 @@ options: - availability zone in which to launch the instance. Used only when command=create or command=replicate. required: false default: null - aliases: [] + aliases: ['aws_zone', 'ec2_zone'] subnet: description: - VPC subnet group. If specified then a VPC instance is created. Used only when command=create. @@ -178,18 +178,18 @@ options: required: false default: null aliases: [] - ec2_secret_key: + aws_secret_key: description: - - EC2 secret key. If not specified then the EC2_SECRET_KEY environment variable is used. + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false default: null - aliases: [] - ec2_access_key: + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: description: - - EC2 access key. If not specified then the EC2_ACCESS_KEY environment variable is used. + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. required: false default: null - aliases: [] + aliases: [ 'ec2_access_key', 'access_key' ] wait: description: - When command=create, replicate, or modify then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated. @@ -263,12 +263,6 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -try: - import urllib2 -except ImportError: - print "failed=True msg='urllib2 required for this module'" - sys.exit(1) - def main(): module = AnsibleModule( argument_spec = dict( @@ -293,11 +287,11 @@ def main(): maint_window = dict(required=False), backup_window = dict(required=False), backup_retention = dict(required=False), - ec2_region = dict(aliases=['EC2_REGION'], choices=AWS_REGIONS, required=False), - zone = dict(required=False), + region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS, required=False), + zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False), subnet = dict(required=False), - ec2_secret_key = dict(aliases=['EC2_SECRET_KEY'], no_log=True, required=False), - ec2_access_key = dict(aliases=['EC2_ACCESS_KEY'], required=False), + aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True, required=False), + aws_access_key = dict(aliases=['ec2_access_key', 'access_key'], required=False), wait = dict(type='bool', default=False), wait_timeout = dict(default=300), snapshot = dict(required=False), @@ -327,39 +321,40 @@ def main(): subnet = module.params.get('subnet') backup_window = module.params.get('backup_window') backup_retention = module.params.get('module_retention') - ec2_region = module.params.get('ec2_region') + region = module.params.get('region') zone = module.params.get('zone') - ec2_secret_key = module.params.get('ec2_secret_key') - ec2_access_key = module.params.get('ec2_access_key') + aws_secret_key = module.params.get('aws_secret_key') + aws_access_key = module.params.get('aws_access_key') wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) snapshot = module.params.get('snapshot') apply_immediately = module.params.get('apply_immediately') # allow environment variables to be used if ansible vars aren't set - if not ec2_secret_key and 'EC2_SECRET_KEY' in os.environ: - ec2_secret_key = os.environ['EC2_SECRET_KEY'] - if not ec2_access_key and 'EC2_ACCESS_KEY' in os.environ: - ec2_access_key = os.environ['EC2_ACCESS_KEY'] - if not ec2_region and 'EC2_REGION' in os.environ: - ec2_region = os.environ['EC2_REGION'] + if not region: + if 'AWS_REGION' in os.environ: + region = os.environ['AWS_REGION'] + elif 'EC2_REGION' in os.environ: + region = os.environ['EC2_REGION'] - # If region isn't set either as a param or environ variable then - # look up the current region via the AWS URL - if not ec2_region: - response = urllib2.urlopen('http://169.254.169.254/latest/meta-data/placement/availability-zone') - az = response.read() - for r in AWS_REGIONS: - if az.startswith(r): - ec2_region = r - break + if not aws_secret_key: + if 'AWS_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['AWS_SECRET_KEY'] + elif 'EC2_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['EC2_SECRET_KEY'] - if not ec2_region: - module.fail_json(msg = str("ec2_region not specified and unable to determine region from AWS.")) + if not aws_access_key: + if 'AWS_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['AWS_ACCESS_KEY'] + elif 'EC2_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['EC2_ACCESS_KEY'] + + if not aws_region: + module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION.")) # connect to the rds endpoint try: - conn = boto.rds.connect_to_region(ec2_region, aws_access_key_id=ec2_access_key, aws_secret_access_key=ec2_secret_key) + conn = boto.rds.connect_to_region(aws_region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) except boto.exception.BotoServerError, e: module.fail_json(msg = e.error_message) diff --git a/library/cloud/route53 b/library/cloud/route53 index d209ab815b3..efe10b7f077 100644 --- a/library/cloud/route53 +++ b/library/cloud/route53 @@ -60,18 +60,18 @@ options: required: false default: null aliases: [] - ec2_secret_key: + aws_secret_key: description: - - EC2 secret key. If not specified then the EC2_SECRET_KEY environment variable is used. + - AWS secret key. required: false default: null - aliases: [] - ec2_access_key: + aliases: ['ec2_secret_key', 'secret_key'] + aws_access_key: description: - - EC2 access key. If not specified then the EC2_ACCESS_KEY environment variable is used. + - AWS access key. required: false default: null - aliases: [] + aliases: ['ec2_access_key', 'access_key'] requirements: [ "boto" ] author: Bruce Pennypacker ''' @@ -98,9 +98,9 @@ EXAMPLES = ''' - route53: > command=delete zone=foo.com - record={{ r.set.record }} - type={{ r.set.type }} - value={{ r.set.value }} + record={{ rec.set.record }} + type={{ rec.set.type }} + value={{ rec.set.value }} # Add an AAAA record. Note that because there are colons in the value # that the entire parameter list must be quoted: @@ -132,8 +132,8 @@ def main(): ttl = dict(required=False, default=3600), type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), value = dict(required=False), - ec2_secret_key = dict(aliases=['EC2_SECRET_KEY'], no_log=True, required=False), - ec2_access_key = dict(aliases=['EC2_ACCESS_KEY'], required=False) + aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True, required=False), + aws_access_key = dict(aliases=['ec2_access_key', 'access_key'], required=False) ) ) @@ -143,8 +143,8 @@ def main(): record_in = module.params.get('record') type_in = module.params.get('type') value_in = module.params.get('value') - ec2_secret_key = module.params.get('ec2_secret_key') - ec2_access_key = module.params.get('ec2_access_key') + aws_secret_key = module.params.get('aws_secret_key') + aws_access_key = module.params.get('aws_access_key') value_list = () @@ -165,14 +165,21 @@ def main(): module.fail_json(msg = "parameter 'value' required for create/delete") # allow environment variables to be used if ansible vars aren't set - if not ec2_secret_key and 'EC2_SECRET_KEY' in os.environ: - ec2_secret_key = os.environ['EC2_SECRET_KEY'] - if not ec2_access_key and 'EC2_ACCESS_KEY' in os.environ: - ec2_access_key = os.environ['EC2_ACCESS_KEY'] + if not aws_secret_key: + if 'AWS_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['AWS_SECRET_KEY'] + elif 'EC2_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['EC2_SECRET_KEY'] + + if not aws_access_key: + if 'AWS_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['AWS_ACCESS_KEY'] + elif 'EC2_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['EC2_ACCESS_KEY'] # connect to the route53 endpoint try: - conn = boto.route53.connection.Route53Connection(ec2_access_key, ec2_secret_key) + conn = boto.route53.connection.Route53Connection(aws_access_key, aws_secret_key) except boto.exception.BotoServerError, e: module.fail_json(msg = e.error_message) diff --git a/library/cloud/s3 b/library/cloud/s3 index faddbec327c..062d6e06e48 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -72,18 +72,18 @@ options: - S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. default: null aliases: [ S3_URL ] - ec2_access_key: + aws_secret_key: description: - - EC2 access key. If not specified then the EC2_ACCESS_KEY environment variable is used. + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false default: null - aliases: [ EC2_ACCESS_KEY ] - ec2_secret_key: + aliases: ['ec2_secret_key', 'secret_key'] + aws_access_key: description: - - EC2 secret key. If not specified then the EC2_SECRET_KEY environment variable is used. + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. required: false default: null - aliases: [ EC2_SECRET_KEY ] + aliases: [ 'ec2_access_key', 'access_key' ] requirements: [ "boto" ] author: Lester Wade, Ralph Tice ''' @@ -252,8 +252,8 @@ def main(): mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True), expiry = dict(default=600, aliases=['expiration']), s3_url = dict(aliases=['S3_URL']), - ec2_secret_key = dict(aliases=['EC2_SECRET_KEY']), - ec2_access_key = dict(aliases=['EC2_ACCESS_KEY']), + aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True, required=False), + aws_access_key = dict(aliases=['ec2_access_key', 'access_key'], required=False), overwrite = dict(default=False, type='bool'), ), ) @@ -265,8 +265,8 @@ def main(): mode = module.params.get('mode') expiry = int(module.params['expiry']) s3_url = module.params.get('s3_url') - ec2_secret_key = module.params.get('ec2_secret_key') - ec2_access_key = module.params.get('ec2_access_key') + aws_secret_key = module.params.get('aws_secret_key') + aws_access_key = module.params.get('aws_access_key') overwrite = module.params.get('overwrite') if module.params.get('object'): @@ -275,21 +275,29 @@ def main(): # allow eucarc environment variables to be used if ansible vars aren't set if not s3_url and 'S3_URL' in os.environ: s3_url = os.environ['S3_URL'] - if not ec2_secret_key and 'EC2_SECRET_KEY' in os.environ: - ec2_secret_key = os.environ['EC2_SECRET_KEY'] - if not ec2_access_key and 'EC2_ACCESS_KEY' in os.environ: - ec2_access_key = os.environ['EC2_ACCESS_KEY'] + + if not aws_secret_key: + if 'AWS_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['AWS_SECRET_KEY'] + elif 'EC2_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['EC2_SECRET_KEY'] + + if not aws_access_key: + if 'AWS_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['AWS_ACCESS_KEY'] + elif 'EC2_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['EC2_ACCESS_KEY'] # If we have an S3_URL env var set, this is likely to be Walrus, so change connection method if 'S3_URL' in os.environ: try: walrus = urlparse.urlparse(s3_url).hostname - s3 = boto.connect_walrus(walrus, ec2_access_key, ec2_secret_key) + s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) else: try: - s3 = boto.connect_s3(ec2_access_key, ec2_secret_key) + s3 = boto.connect_s3(aws_access_key, aws_secret_key) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) From 2e20387671043eecc920f418be7b7fce7376acc8 Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Tue, 20 Aug 2013 15:00:46 -0400 Subject: [PATCH 2/4] yet another rebase attempt --- library/cloud/s3.orig | 473 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 473 insertions(+) create mode 100644 library/cloud/s3.orig diff --git a/library/cloud/s3.orig b/library/cloud/s3.orig new file mode 100644 index 00000000000..57d75d058b0 --- /dev/null +++ b/library/cloud/s3.orig @@ -0,0 +1,473 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: s3 +short_description: idempotent S3 module putting a file into S3. +description: + - This module allows the user to dictate the presence of a given file in an S3 bucket. If or once the key (file) exists in the bucket, it returns a time-expired download URL. This module has a dependency on python-boto. +version_added: "1.1" +options: + bucket: + description: + - Bucket name. + required: true + default: null + aliases: [] + object: + description: + - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. + required: false + default: null + aliases: [] + version_added: "1.3" + src: + description: + - The source file path when performing a PUT operation. + required: false + default: null + aliases: [] + version_added: "1.3" + dest: + description: + - The destination file path when downloading an object/key with a GET operation. + required: false + default: 600 + aliases: [] + version_added: "1.3" + overwrite: + description: + - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. + required: false + default: false + version_added: "1.2" + mode: + description: + - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket). + required: true + default: null + aliases: [] + expiry: + description: + - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation. + required: false + default: null + aliases: [] + s3_url: + description: + - S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. + default: null + aliases: [ S3_URL ] +<<<<<<< HEAD +======= + +>>>>>>> yet another rebase attempt + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: ['ec2_secret_key', 'secret_key'] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] +requirements: [ "boto" ] +author: Lester Wade, Ralph Tice +''' + +EXAMPLES = ''' +# Simple PUT operation +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put +# Simple GET operation +- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get +# GET/download and overwrite local file (trust remote) +- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=true +# PUT/upload and overwrite remote file (trust local) +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put overwrite=true +# Download an object as a string to use else where in your playbook +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=getstr +# Create an empty bucket +- s3: bucket=mybucket mode=create +# Create a bucket with key as directory +- s3: bucket=mybucket object=/my/directory/path mode=create +# Delete a bucket and all contents +- s3: bucket=mybucket mode=delete +''' + +import sys +import os +import urlparse +import hashlib + +try: + import boto +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + +def key_check(module, s3, bucket, obj): + try: + bucket = s3.lookup(bucket) + key_check = bucket.get_key(obj) + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + if key_check: + return True + else: + return False + +def keysum(module, s3, bucket, obj): + bucket = s3.lookup(bucket) + key_check = bucket.get_key(obj) + if key_check: + md5_remote = key_check.etag[1:-1] + etag_multipart = md5_remote.find('-')!=-1 #Check for multipart, etag is not md5 + if etag_multipart is True: + module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.") + sys.exit(0) + return md5_remote + +def bucket_check(module, s3, bucket): + try: + result = s3.lookup(bucket) + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + if result: + return True + else: + return False + +def create_bucket(module, s3, bucket): + try: + bucket = s3.create_bucket(bucket) + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + if bucket: + return True + +def delete_bucket(module, s3, bucket): + try: + bucket = s3.lookup(bucket) + bucket_contents = bucket.list() + bucket.delete_keys([key.name for key in bucket_contents]) + bucket.delete() + return True + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + +def delete_key(module, s3, bucket, obj): + try: + bucket = s3.lookup(bucket) + bucket.delete_key(obj) + module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True) + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + +def create_dirkey(module, s3, bucket, obj): + try: + bucket = s3.lookup(bucket) + key = bucket.new_key(obj) + key.set_contents_from_string('') + module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True) + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + +def upload_file_check(src): + if os.path.exists(src): + file_exists is True + else: + file_exists is False + if os.path.isdir(src): + module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True) + sys.exit(0) + return file_exists + +def path_check(path): + if os.path.exists(path): + return True + else: + return False + +def upload_s3file(module, s3, bucket, obj, src, expiry): + try: + bucket = s3.lookup(bucket) + key = bucket.new_key(obj) + key.set_contents_from_filename(src) + url = key.generate_url(expiry) + module.exit_json(msg="PUT operation complete", url=url, changed=True) + sys.exit(0) + except s3.provider.storage_copy_error, e: + module.fail_json(msg= str(e)) + +def download_s3file(module, s3, bucket, obj, dest): + try: + bucket = s3.lookup(bucket) + key = bucket.lookup(obj) + key.get_contents_to_filename(dest) + module.exit_json(msg="GET operation complete", changed=True) + sys.exit(0) + except s3.provider.storage_copy_error, e: + module.fail_json(msg= str(e)) + +def download_s3str(module, s3, bucket, obj): + try: + bucket = s3.lookup(bucket) + key = bucket.lookup(obj) + contents = key.get_contents_as_string() + module.exit_json(msg="GET operation complete", contents=contents, changed=True) + sys.exit(0) + except s3.provider.storage_copy_error, e: + module.fail_json(msg= str(e)) + +def get_download_url(module, s3, bucket, obj, expiry): + try: + bucket = s3.lookup(bucket) + key = bucket.lookup(obj) + url = key.generate_url(expiry) + module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=True) + sys.exit(0) + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + +def main(): + module = AnsibleModule( + argument_spec = dict( + bucket = dict(required=True), + object = dict(), + src = dict(), + dest = dict(), + mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True), + expiry = dict(default=600, aliases=['expiration']), + s3_url = dict(aliases=['S3_URL']), + aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True, required=False), + aws_access_key = dict(aliases=['ec2_access_key', 'access_key'], required=False), + overwrite = dict(default=False, type='bool'), + ), + ) + + bucket = module.params.get('bucket') + obj = module.params.get('object') + src = module.params.get('src') + dest = module.params.get('dest') + mode = module.params.get('mode') + expiry = int(module.params['expiry']) + s3_url = module.params.get('s3_url') + aws_secret_key = module.params.get('aws_secret_key') + aws_access_key = module.params.get('aws_access_key') + overwrite = module.params.get('overwrite') + + if module.params.get('object'): + obj = os.path.expanduser(module.params['object']) + + # allow eucarc environment variables to be used if ansible vars aren't set + if not s3_url and 'S3_URL' in os.environ: + s3_url = os.environ['S3_URL'] + + if not aws_secret_key: + if 'AWS_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['AWS_SECRET_KEY'] + elif 'EC2_SECRET_KEY' in os.environ: + aws_secret_key = os.environ['EC2_SECRET_KEY'] + + if not aws_access_key: + if 'AWS_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['AWS_ACCESS_KEY'] + elif 'EC2_ACCESS_KEY' in os.environ: + aws_access_key = os.environ['EC2_ACCESS_KEY'] + + # If we have an S3_URL env var set, this is likely to be Walrus, so change connection method + if 'S3_URL' in os.environ: + try: + walrus = urlparse.urlparse(s3_url).hostname + s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + else: + try: + s3 = boto.connect_s3(aws_access_key, aws_secret_key) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + + # If our mode is a GET operation (download), go through the procedure as appropriate ... + if mode == 'get': + + # First, we check to see if the bucket exists, we get "bucket" returned. + bucketrtn = bucket_check(module, s3, bucket) + if bucketrtn is False: + module.fail_json(msg="Target bucket cannot be found", failed=True) + sys.exit(0) + + # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. + keyrtn = key_check(module, s3, bucket, obj) + if keyrtn is False: + module.fail_json(msg="Target key cannot be found", failed=True) + sys.exit(0) + + # If the destination path doesn't exist, no need to md5um etag check, so just download. + pathrtn = path_check(dest) + if pathrtn is False: + download_s3file(module, s3, bucket, obj, dest) + + # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. + if pathrtn is True: + md5_remote = keysum(module, s3, bucket, obj) + md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() + if md5_local == md5_remote: + sum_matches = True + if overwrite is True: + download_s3file(module, s3, bucket, obj, dest) + else: + module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) + else: + sum_matches = False + if overwrite is True: + download_s3file(module, s3, bucket, obj, dest) + else: + module.fail_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) + + # If destination file doesn't already exist we can go ahead and download. + if pathrtn is False: + download_s3file(module, s3, bucket, obj, dest) + + # Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message. + if sum_matches is True and overwrite is False: + module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) + + # At this point explicitly define the overwrite condition. + if sum_matches is True and pathrtn is True and overwrite is True: + download_s3file(module, s3, bucket, obj, dest) + + # If sum does not match but the destination exists, we + + # if our mode is a PUT operation (upload), go through the procedure as appropriate ... + if mode == 'put': + + # Use this snippet to debug through conditionals: +# module.exit_json(msg="Bucket return %s"%bucketrtn) +# sys.exit(0) + + # Lets check the src path. + pathrtn = path_check(src) + if pathrtn is False: + module.fail_json(msg="Local object for PUT does not exist", failed=True) + sys.exit(0) + + # Lets check to see if bucket exists to get ground truth. + bucketrtn = bucket_check(module, s3, bucket) + keyrtn = key_check(module, s3, bucket, obj) + + # Lets check key state. Does it exist and if it does, compute the etag md5sum. + if bucketrtn is True and keyrtn is True: + md5_remote = keysum(module, s3, bucket, obj) + md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() + if md5_local == md5_remote: + sum_matches = True + if overwrite is True: + upload_s3file(module, s3, bucket, obj, src, expiry) + else: + module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) + else: + sum_matches = False + if overwrite is True: + upload_s3file(module, s3, bucket, obj, src, expiry) + else: + module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) + + # If neither exist (based on bucket existence), we can create both. + if bucketrtn is False and pathrtn is True: + create_bucket(module, s3, bucket) + upload_s3file(module, s3, bucket, obj, src, expiry) + + # If bucket exists but key doesn't, just upload. + if bucketrtn is True and pathrtn is True and keyrtn is False: + upload_s3file(module, s3, bucket, obj, src, expiry) + + # Support for deleting an object if we have both params. + if mode == 'delete': + if bucket: + bucketrtn = bucket_check(module, s3, bucket) + if bucketrtn is True: + deletertn = delete_bucket(module, s3, bucket) + if deletertn is True: + module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True) + else: + module.fail_json(msg="Bucket does not exist.", failed=True) + else: + module.fail_json(msg="Bucket parameter is required.", failed=True) + + # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. + # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. + if mode == 'create': + if bucket and not obj: + bucketrtn = bucket_check(module, s3, bucket) + if bucketrtn is True: + module.exit_json(msg="Bucket already exists.", changed=False) + else: + created = create_bucket(module, s3, bucket) + if bucket and obj: + bucketrtn = bucket_check(module, s3, bucket) + if obj.endswith('/'): + dirobj = obj + else: + dirobj = obj + "/" + if bucketrtn is True: + keyrtn = key_check(module, s3, bucket, dirobj) + if keyrtn is True: + module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False) + else: + create_dirkey(module, s3, bucket, dirobj) + if bucketrtn is False: + created = create_bucket(module, s3, bucket) + create_dirkey(module, s3, bucket, dirobj) + + # Support for grabbing the time-expired URL for an object in S3/Walrus. + if mode == 'geturl': + if bucket and obj: + bucketrtn = bucket_check(module, s3, bucket) + if bucketrtn is False: + module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) + else: + keyrtn = key_check(module, s3, bucket, obj) + if keyrtn is True: + get_download_url(module, s3, bucket, obj, expiry) + else: + module.fail_json(msg="Key %s does not exist."%obj, failed=True) + else: + module.fail_json(msg="Bucket and Object parameters must be set", failed=True) + sys.exit(0) + + if mode == 'getstr': + if bucket and obj: + bucketrtn = bucket_check(module, s3, bucket) + if bucketrtn is False: + module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) + else: + keyrtn = key_check(module, s3, bucket, obj) + if keyrtn is True: + download_s3str(module, s3, bucket, obj) + else: + module.fail_json(msg="Key %s does not exist."%obj, failed=True) + + sys.exit(0) + +# this is magic, see lib/ansible/module_common.py +#<> + +main() From 80ddb1aee6604dad681ed7d79c10a56619e5e847 Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Fri, 23 Aug 2013 13:56:59 -0400 Subject: [PATCH 3/4] Added wait parameter --- library/cloud/ec2_elb | 34 +-- library/cloud/s3.orig | 473 ------------------------------------------ 2 files changed, 22 insertions(+), 485 deletions(-) delete mode 100644 library/cloud/s3.orig diff --git a/library/cloud/ec2_elb b/library/cloud/ec2_elb index a8131c2f48b..7588cd234a9 100644 --- a/library/cloud/ec2_elb +++ b/library/cloud/ec2_elb @@ -60,6 +60,12 @@ options: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: false aliases: ['aws_region', 'ec2_region'] + wait: + description: + - Wait for instance registration or deregistration to complete successfully before returning. + required: false + default: yes + choices: [ "yes", "no" ] """ @@ -124,21 +130,23 @@ class ElbManager: else: self.changed = False - def deregister(self): + def deregister(self, wait): """De-register the instance from all ELBs and wait for the ELB to report it out-of-service""" for lb in self.lbs: lb.deregister_instances([self.instance_id]) - self._await_elb_instance_state(lb, 'OutOfService') + if wait: + self._await_elb_instance_state(lb, 'OutOfService') - def register(self): + def register(self, wait): """Register the instance for all ELBs and wait for the ELB to report the instance in-service""" for lb in self.lbs: lb.register_instances([self.instance_id]) - self._await_elb_instance_state(lb, 'InService') + if wait: + self._await_elb_instance_state(lb, 'InService') def exists(self, lbtest): """ Verify that the named ELB actually exists """ @@ -196,10 +204,11 @@ def main(): state={'required': True, 'choices': ['present', 'absent']}, instance_id={'required': True}, - ec2_elbs={'default': None, 'required': False}, + ec2_elbs={'default': None, 'required': False, 'type':'list'}, aws_secret_key={'default': None, 'aliases': ['ec2_secret_key', 'secret_key'], 'no_log': True}, aws_access_key={'default': None, 'aliases': ['ec2_access_key', 'access_key']}, - region={'default': None, 'required': False, 'aliases':['aws_region', 'ec2_region'], 'choices':AWS_REGIONS} + region={'default': None, 'required': False, 'aliases':['aws_region', 'ec2_region'], 'choices':AWS_REGIONS}, + wait={'required': False, 'choices': BOOLEANS, 'default': True} ) ) @@ -207,6 +216,7 @@ def main(): aws_access_key = module.params['aws_access_key'] ec2_elbs = module.params['ec2_elbs'] region = module.params['region'] + wait = module.params['wait'] if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: module.fail_json(msg="ELBs are required for registration") @@ -230,21 +240,21 @@ def main(): region = os.environ['EC2_REGION'] if not region: - module.fail_json(msg = str("Either region or EC2_REGION environment variable must be set.")) + module.fail_json(msg=str("Either region or EC2_REGION environment variable must be set.")) instance_id = module.params['instance_id'] elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key, aws_secret_key, region=region) - for elb in [ ec2_elbs ]: + for elb in ec2_elbs: if not elb_man.exists(elb): - str="ELB %s does not exist" % elb - module.fail_json(msg=str) + msg="ELB %s does not exist" % elb + module.fail_json(msg=msg) if module.params['state'] == 'present': - elb_man.register() + elb_man.register(wait) elif module.params['state'] == 'absent': - elb_man.deregister() + elb_man.deregister(wait) ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) diff --git a/library/cloud/s3.orig b/library/cloud/s3.orig deleted file mode 100644 index 57d75d058b0..00000000000 --- a/library/cloud/s3.orig +++ /dev/null @@ -1,473 +0,0 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: s3 -short_description: idempotent S3 module putting a file into S3. -description: - - This module allows the user to dictate the presence of a given file in an S3 bucket. If or once the key (file) exists in the bucket, it returns a time-expired download URL. This module has a dependency on python-boto. -version_added: "1.1" -options: - bucket: - description: - - Bucket name. - required: true - default: null - aliases: [] - object: - description: - - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. - required: false - default: null - aliases: [] - version_added: "1.3" - src: - description: - - The source file path when performing a PUT operation. - required: false - default: null - aliases: [] - version_added: "1.3" - dest: - description: - - The destination file path when downloading an object/key with a GET operation. - required: false - default: 600 - aliases: [] - version_added: "1.3" - overwrite: - description: - - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. - required: false - default: false - version_added: "1.2" - mode: - description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket). - required: true - default: null - aliases: [] - expiry: - description: - - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation. - required: false - default: null - aliases: [] - s3_url: - description: - - S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. - default: null - aliases: [ S3_URL ] -<<<<<<< HEAD -======= - ->>>>>>> yet another rebase attempt - aws_secret_key: - description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - required: false - default: null - aliases: ['ec2_secret_key', 'secret_key'] - aws_access_key: - description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. - required: false - default: null - aliases: [ 'ec2_access_key', 'access_key' ] -requirements: [ "boto" ] -author: Lester Wade, Ralph Tice -''' - -EXAMPLES = ''' -# Simple PUT operation -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put -# Simple GET operation -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get -# GET/download and overwrite local file (trust remote) -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=true -# PUT/upload and overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put overwrite=true -# Download an object as a string to use else where in your playbook -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=getstr -# Create an empty bucket -- s3: bucket=mybucket mode=create -# Create a bucket with key as directory -- s3: bucket=mybucket object=/my/directory/path mode=create -# Delete a bucket and all contents -- s3: bucket=mybucket mode=delete -''' - -import sys -import os -import urlparse -import hashlib - -try: - import boto -except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) - -def key_check(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - key_check = bucket.get_key(obj) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if key_check: - return True - else: - return False - -def keysum(module, s3, bucket, obj): - bucket = s3.lookup(bucket) - key_check = bucket.get_key(obj) - if key_check: - md5_remote = key_check.etag[1:-1] - etag_multipart = md5_remote.find('-')!=-1 #Check for multipart, etag is not md5 - if etag_multipart is True: - module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.") - sys.exit(0) - return md5_remote - -def bucket_check(module, s3, bucket): - try: - result = s3.lookup(bucket) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if result: - return True - else: - return False - -def create_bucket(module, s3, bucket): - try: - bucket = s3.create_bucket(bucket) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - if bucket: - return True - -def delete_bucket(module, s3, bucket): - try: - bucket = s3.lookup(bucket) - bucket_contents = bucket.list() - bucket.delete_keys([key.name for key in bucket_contents]) - bucket.delete() - return True - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def delete_key(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - bucket.delete_key(obj) - module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def create_dirkey(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - key = bucket.new_key(obj) - key.set_contents_from_string('') - module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def upload_file_check(src): - if os.path.exists(src): - file_exists is True - else: - file_exists is False - if os.path.isdir(src): - module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True) - sys.exit(0) - return file_exists - -def path_check(path): - if os.path.exists(path): - return True - else: - return False - -def upload_s3file(module, s3, bucket, obj, src, expiry): - try: - bucket = s3.lookup(bucket) - key = bucket.new_key(obj) - key.set_contents_from_filename(src) - url = key.generate_url(expiry) - module.exit_json(msg="PUT operation complete", url=url, changed=True) - sys.exit(0) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def download_s3file(module, s3, bucket, obj, dest): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - key.get_contents_to_filename(dest) - module.exit_json(msg="GET operation complete", changed=True) - sys.exit(0) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def download_s3str(module, s3, bucket, obj): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - contents = key.get_contents_as_string() - module.exit_json(msg="GET operation complete", contents=contents, changed=True) - sys.exit(0) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) - -def get_download_url(module, s3, bucket, obj, expiry): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - url = key.generate_url(expiry) - module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=True) - sys.exit(0) - except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) - -def main(): - module = AnsibleModule( - argument_spec = dict( - bucket = dict(required=True), - object = dict(), - src = dict(), - dest = dict(), - mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True), - expiry = dict(default=600, aliases=['expiration']), - s3_url = dict(aliases=['S3_URL']), - aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True, required=False), - aws_access_key = dict(aliases=['ec2_access_key', 'access_key'], required=False), - overwrite = dict(default=False, type='bool'), - ), - ) - - bucket = module.params.get('bucket') - obj = module.params.get('object') - src = module.params.get('src') - dest = module.params.get('dest') - mode = module.params.get('mode') - expiry = int(module.params['expiry']) - s3_url = module.params.get('s3_url') - aws_secret_key = module.params.get('aws_secret_key') - aws_access_key = module.params.get('aws_access_key') - overwrite = module.params.get('overwrite') - - if module.params.get('object'): - obj = os.path.expanduser(module.params['object']) - - # allow eucarc environment variables to be used if ansible vars aren't set - if not s3_url and 'S3_URL' in os.environ: - s3_url = os.environ['S3_URL'] - - if not aws_secret_key: - if 'AWS_SECRET_KEY' in os.environ: - aws_secret_key = os.environ['AWS_SECRET_KEY'] - elif 'EC2_SECRET_KEY' in os.environ: - aws_secret_key = os.environ['EC2_SECRET_KEY'] - - if not aws_access_key: - if 'AWS_ACCESS_KEY' in os.environ: - aws_access_key = os.environ['AWS_ACCESS_KEY'] - elif 'EC2_ACCESS_KEY' in os.environ: - aws_access_key = os.environ['EC2_ACCESS_KEY'] - - # If we have an S3_URL env var set, this is likely to be Walrus, so change connection method - if 'S3_URL' in os.environ: - try: - walrus = urlparse.urlparse(s3_url).hostname - s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - else: - try: - s3 = boto.connect_s3(aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - - # If our mode is a GET operation (download), go through the procedure as appropriate ... - if mode == 'get': - - # First, we check to see if the bucket exists, we get "bucket" returned. - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is False: - module.fail_json(msg="Target bucket cannot be found", failed=True) - sys.exit(0) - - # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. - keyrtn = key_check(module, s3, bucket, obj) - if keyrtn is False: - module.fail_json(msg="Target key cannot be found", failed=True) - sys.exit(0) - - # If the destination path doesn't exist, no need to md5um etag check, so just download. - pathrtn = path_check(dest) - if pathrtn is False: - download_s3file(module, s3, bucket, obj, dest) - - # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. - if pathrtn is True: - md5_remote = keysum(module, s3, bucket, obj) - md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() - if md5_local == md5_remote: - sum_matches = True - if overwrite is True: - download_s3file(module, s3, bucket, obj, dest) - else: - module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) - else: - sum_matches = False - if overwrite is True: - download_s3file(module, s3, bucket, obj, dest) - else: - module.fail_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) - - # If destination file doesn't already exist we can go ahead and download. - if pathrtn is False: - download_s3file(module, s3, bucket, obj, dest) - - # Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message. - if sum_matches is True and overwrite is False: - module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) - - # At this point explicitly define the overwrite condition. - if sum_matches is True and pathrtn is True and overwrite is True: - download_s3file(module, s3, bucket, obj, dest) - - # If sum does not match but the destination exists, we - - # if our mode is a PUT operation (upload), go through the procedure as appropriate ... - if mode == 'put': - - # Use this snippet to debug through conditionals: -# module.exit_json(msg="Bucket return %s"%bucketrtn) -# sys.exit(0) - - # Lets check the src path. - pathrtn = path_check(src) - if pathrtn is False: - module.fail_json(msg="Local object for PUT does not exist", failed=True) - sys.exit(0) - - # Lets check to see if bucket exists to get ground truth. - bucketrtn = bucket_check(module, s3, bucket) - keyrtn = key_check(module, s3, bucket, obj) - - # Lets check key state. Does it exist and if it does, compute the etag md5sum. - if bucketrtn is True and keyrtn is True: - md5_remote = keysum(module, s3, bucket, obj) - md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() - if md5_local == md5_remote: - sum_matches = True - if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry) - else: - module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) - else: - sum_matches = False - if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry) - else: - module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) - - # If neither exist (based on bucket existence), we can create both. - if bucketrtn is False and pathrtn is True: - create_bucket(module, s3, bucket) - upload_s3file(module, s3, bucket, obj, src, expiry) - - # If bucket exists but key doesn't, just upload. - if bucketrtn is True and pathrtn is True and keyrtn is False: - upload_s3file(module, s3, bucket, obj, src, expiry) - - # Support for deleting an object if we have both params. - if mode == 'delete': - if bucket: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is True: - deletertn = delete_bucket(module, s3, bucket) - if deletertn is True: - module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True) - else: - module.fail_json(msg="Bucket does not exist.", failed=True) - else: - module.fail_json(msg="Bucket parameter is required.", failed=True) - - # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. - # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. - if mode == 'create': - if bucket and not obj: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is True: - module.exit_json(msg="Bucket already exists.", changed=False) - else: - created = create_bucket(module, s3, bucket) - if bucket and obj: - bucketrtn = bucket_check(module, s3, bucket) - if obj.endswith('/'): - dirobj = obj - else: - dirobj = obj + "/" - if bucketrtn is True: - keyrtn = key_check(module, s3, bucket, dirobj) - if keyrtn is True: - module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False) - else: - create_dirkey(module, s3, bucket, dirobj) - if bucketrtn is False: - created = create_bucket(module, s3, bucket) - create_dirkey(module, s3, bucket, dirobj) - - # Support for grabbing the time-expired URL for an object in S3/Walrus. - if mode == 'geturl': - if bucket and obj: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is False: - module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) - else: - keyrtn = key_check(module, s3, bucket, obj) - if keyrtn is True: - get_download_url(module, s3, bucket, obj, expiry) - else: - module.fail_json(msg="Key %s does not exist."%obj, failed=True) - else: - module.fail_json(msg="Bucket and Object parameters must be set", failed=True) - sys.exit(0) - - if mode == 'getstr': - if bucket and obj: - bucketrtn = bucket_check(module, s3, bucket) - if bucketrtn is False: - module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) - else: - keyrtn = key_check(module, s3, bucket, obj) - if keyrtn is True: - download_s3str(module, s3, bucket, obj) - else: - module.fail_json(msg="Key %s does not exist."%obj, failed=True) - - sys.exit(0) - -# this is magic, see lib/ansible/module_common.py -#<> - -main() From 2f6dcfe36f108f86fca4c8d356d289b634c96108 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 23 Aug 2013 13:40:57 -0500 Subject: [PATCH 4/4] Fixed small typo from the merge conflict in the route53 module --- library/cloud/route53 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/cloud/route53 b/library/cloud/route53 index 946e49f1216..38db85deb68 100644 --- a/library/cloud/route53 +++ b/library/cloud/route53 @@ -139,7 +139,7 @@ def main(): type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), value = dict(required=False), aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True, required=False), - aws_access_key = dict(aliases=['ec2_access_key', 'access_key'], required=False) + aws_access_key = dict(aliases=['ec2_access_key', 'access_key'], required=False), overwrite = dict(required=False, choices=BOOLEANS, type='bool') ) )