diff --git a/cloud/s3.orig b/cloud/s3.orig
new file mode 100644
index 00000000000..57d75d058b0
--- /dev/null
+++ b/cloud/s3.orig
@@ -0,0 +1,473 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = '''
+---
+module: s3
+short_description: idempotent S3 module putting a file into S3.
+description:
+ - This module allows the user to dictate the presence of a given file in an S3 bucket. If or once the key (file) exists in the bucket, it returns a time-expired download URL. This module has a dependency on python-boto.
+version_added: "1.1"
+options:
+ bucket:
+ description:
+ - Bucket name.
+ required: true
+ default: null
+ aliases: []
+ object:
+ description:
+ - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
+ required: false
+ default: null
+ aliases: []
+ version_added: "1.3"
+ src:
+ description:
+ - The source file path when performing a PUT operation.
+ required: false
+ default: null
+ aliases: []
+ version_added: "1.3"
+ dest:
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ required: false
+ default: 600
+ aliases: []
+ version_added: "1.3"
+ overwrite:
+ description:
+ - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ required: false
+ default: false
+ version_added: "1.2"
+ mode:
+ description:
+ - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket).
+ required: true
+ default: null
+ aliases: []
+ expiry:
+ description:
+ - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
+ required: false
+ default: null
+ aliases: []
+ s3_url:
+ description:
+ - S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined.
+ default: null
+ aliases: [ S3_URL ]
+<<<<<<< HEAD
+=======
+
+>>>>>>> yet another rebase attempt
+ aws_secret_key:
+ description:
+ - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
+ required: false
+ default: null
+ aliases: ['ec2_secret_key', 'secret_key']
+ aws_access_key:
+ description:
+ - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
+ required: false
+ default: null
+ aliases: [ 'ec2_access_key', 'access_key' ]
+requirements: [ "boto" ]
+author: Lester Wade, Ralph Tice
+'''
+
+EXAMPLES = '''
+# Simple PUT operation
+- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
+# Simple GET operation
+- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
+# GET/download and overwrite local file (trust remote)
+- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=true
+# PUT/upload and overwrite remote file (trust local)
+- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put overwrite=true
+# Download an object as a string to use else where in your playbook
+- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=getstr
+# Create an empty bucket
+- s3: bucket=mybucket mode=create
+# Create a bucket with key as directory
+- s3: bucket=mybucket object=/my/directory/path mode=create
+# Delete a bucket and all contents
+- s3: bucket=mybucket mode=delete
+'''
+
+import sys
+import os
+import urlparse
+import hashlib
+
+try:
+ import boto
+except ImportError:
+ print "failed=True msg='boto required for this module'"
+ sys.exit(1)
+
+def key_check(module, s3, bucket, obj):
+ try:
+ bucket = s3.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ except s3.provider.storage_response_error, e:
+ module.fail_json(msg= str(e))
+ if key_check:
+ return True
+ else:
+ return False
+
+def keysum(module, s3, bucket, obj):
+ bucket = s3.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ if key_check:
+ md5_remote = key_check.etag[1:-1]
+ etag_multipart = md5_remote.find('-')!=-1 #Check for multipart, etag is not md5
+ if etag_multipart is True:
+ module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.")
+ sys.exit(0)
+ return md5_remote
+
+def bucket_check(module, s3, bucket):
+ try:
+ result = s3.lookup(bucket)
+ except s3.provider.storage_response_error, e:
+ module.fail_json(msg= str(e))
+ if result:
+ return True
+ else:
+ return False
+
+def create_bucket(module, s3, bucket):
+ try:
+ bucket = s3.create_bucket(bucket)
+ except s3.provider.storage_response_error, e:
+ module.fail_json(msg= str(e))
+ if bucket:
+ return True
+
+def delete_bucket(module, s3, bucket):
+ try:
+ bucket = s3.lookup(bucket)
+ bucket_contents = bucket.list()
+ bucket.delete_keys([key.name for key in bucket_contents])
+ bucket.delete()
+ return True
+ except s3.provider.storage_response_error, e:
+ module.fail_json(msg= str(e))
+
+def delete_key(module, s3, bucket, obj):
+ try:
+ bucket = s3.lookup(bucket)
+ bucket.delete_key(obj)
+ module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True)
+ except s3.provider.storage_response_error, e:
+ module.fail_json(msg= str(e))
+
+def create_dirkey(module, s3, bucket, obj):
+ try:
+ bucket = s3.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_string('')
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
+ except s3.provider.storage_response_error, e:
+ module.fail_json(msg= str(e))
+
+def upload_file_check(src):
+ if os.path.exists(src):
+ file_exists is True
+ else:
+ file_exists is False
+ if os.path.isdir(src):
+ module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True)
+ sys.exit(0)
+ return file_exists
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+def upload_s3file(module, s3, bucket, obj, src, expiry):
+ try:
+ bucket = s3.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_filename(src)
+ url = key.generate_url(expiry)
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+ sys.exit(0)
+ except s3.provider.storage_copy_error, e:
+ module.fail_json(msg= str(e))
+
+def download_s3file(module, s3, bucket, obj, dest):
+ try:
+ bucket = s3.lookup(bucket)
+ key = bucket.lookup(obj)
+ key.get_contents_to_filename(dest)
+ module.exit_json(msg="GET operation complete", changed=True)
+ sys.exit(0)
+ except s3.provider.storage_copy_error, e:
+ module.fail_json(msg= str(e))
+
+def download_s3str(module, s3, bucket, obj):
+ try:
+ bucket = s3.lookup(bucket)
+ key = bucket.lookup(obj)
+ contents = key.get_contents_as_string()
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ sys.exit(0)
+ except s3.provider.storage_copy_error, e:
+ module.fail_json(msg= str(e))
+
+def get_download_url(module, s3, bucket, obj, expiry):
+ try:
+ bucket = s3.lookup(bucket)
+ key = bucket.lookup(obj)
+ url = key.generate_url(expiry)
+ module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=True)
+ sys.exit(0)
+ except s3.provider.storage_response_error, e:
+ module.fail_json(msg= str(e))
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ bucket = dict(required=True),
+ object = dict(),
+ src = dict(),
+ dest = dict(),
+ mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True),
+ expiry = dict(default=600, aliases=['expiration']),
+ s3_url = dict(aliases=['S3_URL']),
+ aws_secret_key = dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True, required=False),
+ aws_access_key = dict(aliases=['ec2_access_key', 'access_key'], required=False),
+ overwrite = dict(default=False, type='bool'),
+ ),
+ )
+
+ bucket = module.params.get('bucket')
+ obj = module.params.get('object')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ mode = module.params.get('mode')
+ expiry = int(module.params['expiry'])
+ s3_url = module.params.get('s3_url')
+ aws_secret_key = module.params.get('aws_secret_key')
+ aws_access_key = module.params.get('aws_access_key')
+ overwrite = module.params.get('overwrite')
+
+ if module.params.get('object'):
+ obj = os.path.expanduser(module.params['object'])
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ if not aws_secret_key:
+ if 'AWS_SECRET_KEY' in os.environ:
+ aws_secret_key = os.environ['AWS_SECRET_KEY']
+ elif 'EC2_SECRET_KEY' in os.environ:
+ aws_secret_key = os.environ['EC2_SECRET_KEY']
+
+ if not aws_access_key:
+ if 'AWS_ACCESS_KEY' in os.environ:
+ aws_access_key = os.environ['AWS_ACCESS_KEY']
+ elif 'EC2_ACCESS_KEY' in os.environ:
+ aws_access_key = os.environ['EC2_ACCESS_KEY']
+
+ # If we have an S3_URL env var set, this is likely to be Walrus, so change connection method
+ if 'S3_URL' in os.environ:
+ try:
+ walrus = urlparse.urlparse(s3_url).hostname
+ s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg = str(e))
+ else:
+ try:
+ s3 = boto.connect_s3(aws_access_key, aws_secret_key)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg = str(e))
+
+ # If our mode is a GET operation (download), go through the procedure as appropriate ...
+ if mode == 'get':
+
+ # First, we check to see if the bucket exists, we get "bucket" returned.
+ bucketrtn = bucket_check(module, s3, bucket)
+ if bucketrtn is False:
+ module.fail_json(msg="Target bucket cannot be found", failed=True)
+ sys.exit(0)
+
+ # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
+ keyrtn = key_check(module, s3, bucket, obj)
+ if keyrtn is False:
+ module.fail_json(msg="Target key cannot be found", failed=True)
+ sys.exit(0)
+
+ # If the destination path doesn't exist, no need to md5um etag check, so just download.
+ pathrtn = path_check(dest)
+ if pathrtn is False:
+ download_s3file(module, s3, bucket, obj, dest)
+
+ # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
+ if pathrtn is True:
+ md5_remote = keysum(module, s3, bucket, obj)
+ md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest()
+ if md5_local == md5_remote:
+ sum_matches = True
+ if overwrite is True:
+ download_s3file(module, s3, bucket, obj, dest)
+ else:
+ module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
+ else:
+ sum_matches = False
+ if overwrite is True:
+ download_s3file(module, s3, bucket, obj, dest)
+ else:
+ module.fail_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
+
+ # If destination file doesn't already exist we can go ahead and download.
+ if pathrtn is False:
+ download_s3file(module, s3, bucket, obj, dest)
+
+ # Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
+ if sum_matches is True and overwrite is False:
+ module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
+
+ # At this point explicitly define the overwrite condition.
+ if sum_matches is True and pathrtn is True and overwrite is True:
+ download_s3file(module, s3, bucket, obj, dest)
+
+ # If sum does not match but the destination exists, we
+
+ # if our mode is a PUT operation (upload), go through the procedure as appropriate ...
+ if mode == 'put':
+
+ # Use this snippet to debug through conditionals:
+# module.exit_json(msg="Bucket return %s"%bucketrtn)
+# sys.exit(0)
+
+ # Lets check the src path.
+ pathrtn = path_check(src)
+ if pathrtn is False:
+ module.fail_json(msg="Local object for PUT does not exist", failed=True)
+ sys.exit(0)
+
+ # Lets check to see if bucket exists to get ground truth.
+ bucketrtn = bucket_check(module, s3, bucket)
+ keyrtn = key_check(module, s3, bucket, obj)
+
+ # Lets check key state. Does it exist and if it does, compute the etag md5sum.
+ if bucketrtn is True and keyrtn is True:
+ md5_remote = keysum(module, s3, bucket, obj)
+ md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest()
+ if md5_local == md5_remote:
+ sum_matches = True
+ if overwrite is True:
+ upload_s3file(module, s3, bucket, obj, src, expiry)
+ else:
+ module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
+ else:
+ sum_matches = False
+ if overwrite is True:
+ upload_s3file(module, s3, bucket, obj, src, expiry)
+ else:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
+
+ # If neither exist (based on bucket existence), we can create both.
+ if bucketrtn is False and pathrtn is True:
+ create_bucket(module, s3, bucket)
+ upload_s3file(module, s3, bucket, obj, src, expiry)
+
+ # If bucket exists but key doesn't, just upload.
+ if bucketrtn is True and pathrtn is True and keyrtn is False:
+ upload_s3file(module, s3, bucket, obj, src, expiry)
+
+ # Support for deleting an object if we have both params.
+ if mode == 'delete':
+ if bucket:
+ bucketrtn = bucket_check(module, s3, bucket)
+ if bucketrtn is True:
+ deletertn = delete_bucket(module, s3, bucket)
+ if deletertn is True:
+ module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket does not exist.", failed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.", failed=True)
+
+ # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
+ # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
+ if mode == 'create':
+ if bucket and not obj:
+ bucketrtn = bucket_check(module, s3, bucket)
+ if bucketrtn is True:
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ created = create_bucket(module, s3, bucket)
+ if bucket and obj:
+ bucketrtn = bucket_check(module, s3, bucket)
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+ if bucketrtn is True:
+ keyrtn = key_check(module, s3, bucket, dirobj)
+ if keyrtn is True:
+ module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
+ else:
+ create_dirkey(module, s3, bucket, dirobj)
+ if bucketrtn is False:
+ created = create_bucket(module, s3, bucket)
+ create_dirkey(module, s3, bucket, dirobj)
+
+ # Support for grabbing the time-expired URL for an object in S3/Walrus.
+ if mode == 'geturl':
+ if bucket and obj:
+ bucketrtn = bucket_check(module, s3, bucket)
+ if bucketrtn is False:
+ module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
+ else:
+ keyrtn = key_check(module, s3, bucket, obj)
+ if keyrtn is True:
+ get_download_url(module, s3, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key %s does not exist."%obj, failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+ sys.exit(0)
+
+ if mode == 'getstr':
+ if bucket and obj:
+ bucketrtn = bucket_check(module, s3, bucket)
+ if bucketrtn is False:
+ module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
+ else:
+ keyrtn = key_check(module, s3, bucket, obj)
+ if keyrtn is True:
+ download_s3str(module, s3, bucket, obj)
+ else:
+ module.fail_json(msg="Key %s does not exist."%obj, failed=True)
+
+ sys.exit(0)
+
+# this is magic, see lib/ansible/module_common.py
+#<>
+
+main()