From ab5098fd7ef6959fd7f03fee585645cd9698b3cc Mon Sep 17 00:00:00 2001 From: Jake Kreider Date: Fri, 1 Nov 2013 09:23:01 -0500 Subject: [PATCH 1/3] Added metadata support to s3 module --- library/cloud/s3 | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/library/cloud/s3 b/library/cloud/s3 index 36ddd6ef800..3ce6e1b0b38 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -83,6 +83,11 @@ options: required: false default: null aliases: [ 'ec2_access_key', 'access_key' ] + metadata: + description: + - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. + required: false + default: null requirements: [ "boto" ] author: Lester Wade, Ralph Tice ''' @@ -97,7 +102,9 @@ EXAMPLES = ''' # GET/download and do not overwrite local file (trust remote) - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false # PUT/upload and overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put +# PUT/upload with metadata +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip' # PUT/upload and do not overwrite remote file (trust local) - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false # Download an object as a string to use else where in your playbook @@ -201,10 +208,14 @@ def path_check(path): else: return False -def upload_s3file(module, s3, bucket, obj, src, expiry): +def upload_s3file(module, s3, bucket, obj, src, expiry, metadata): try: bucket = s3.lookup(bucket) - key = bucket.new_key(obj) + key = bucket.new_key(obj) + if metadata: + for meta_key in metadata.keys(): + key.set_metadata(meta_key, metadata[meta_key]) + key.set_contents_from_filename(src) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) @@ -261,6 +272,7 @@ def main(): aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True, required=False), aws_access_key = dict(aliases=['ec2_access_key', 'access_key'], required=False), overwrite = dict(aliases=['force'], default=True, type='bool'), + metadata = dict(type='dict'), ), ) @@ -275,6 +287,7 @@ def main(): aws_secret_key = module.params.get('aws_secret_key') aws_access_key = module.params.get('aws_access_key') overwrite = module.params.get('overwrite') + metadata = module.params.get('metadata') if module.params.get('object'): obj = os.path.expanduser(module.params['object']) @@ -381,24 +394,24 @@ def main(): if md5_local == md5_remote: sum_matches = True if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) else: get_download_url(module, s3, bucket, obj, expiry, changed=False) else: sum_matches = False if overwrite is True: - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) # If neither exist (based on bucket existence), we can create both. if bucketrtn is False and pathrtn is True: create_bucket(module, s3, bucket) - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) # If bucket exists but key doesn't, just upload. if bucketrtn is True and pathrtn is True and keyrtn is False: - upload_s3file(module, s3, bucket, obj, src, expiry) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata) # Support for deleting an object if we have both params. if mode == 'delete': From 58886533b047d50f8a3a4eefcb081a91dd9d3e8d Mon Sep 17 00:00:00 2001 From: Jake Kreider Date: Sun, 2 Mar 2014 20:45:53 -0600 Subject: [PATCH 2/3] Updated S3 metadata examples --- library/cloud/s3 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/cloud/s3 b/library/cloud/s3 index 3ce6e1b0b38..97a0d489813 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -105,6 +105,8 @@ EXAMPLES = ''' - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put # PUT/upload with metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip' +# PUT/upload with multiple metadata +- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' # PUT/upload and do not overwrite remote file (trust local) - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false # Download an object as a string to use else where in your playbook From b6b9e1c6f4c03e60cdd77df505665cbaa87ba44e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 11 Mar 2014 13:48:39 -0500 Subject: [PATCH 3/3] Adding version_added field to metadata field in s3 module --- library/cloud/s3 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/cloud/s3 b/library/cloud/s3 index 4c4503a9db6..4fc470678b2 100644 --- a/library/cloud/s3 +++ b/library/cloud/s3 @@ -88,6 +88,8 @@ options: - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. required: false default: null + version_added: "1.6" + requirements: [ "boto" ] author: Lester Wade, Ralph Tice '''