Merge pull request #1599 from jmunhoz/s3-bucket-ceph
Add Ceph RGW S3 compatibility
This commit is contained in:
commit
ac8e3f18a3
1 changed files with 88 additions and 27 deletions
|
@ -16,9 +16,9 @@
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: s3_bucket
|
||||
short_description: Manage s3 buckets in AWS
|
||||
short_description: Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
|
||||
description:
|
||||
- Manage s3 buckets in AWS
|
||||
- Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
|
||||
version_added: "2.0"
|
||||
author: "Rob White (@wimnat)"
|
||||
options:
|
||||
|
@ -40,9 +40,13 @@ options:
|
|||
default: null
|
||||
s3_url:
|
||||
description:
|
||||
- S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
|
||||
- S3 URL endpoint for usage with Ceph, Eucalypus, fakes3, etc. Otherwise assumes AWS
|
||||
default: null
|
||||
aliases: [ S3_URL ]
|
||||
ceph:
|
||||
description:
|
||||
- Enable API compatibility with Ceph. It takes into account the S3 API subset working with Ceph in order to provide the same module behaviour where possible.
|
||||
version_added: "2.2"
|
||||
requester_pays:
|
||||
description:
|
||||
- With Requester Pays buckets, the requester instead of the bucket owner pays the cost of the request and the data download from the bucket.
|
||||
|
@ -78,6 +82,12 @@ EXAMPLES = '''
|
|||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
|
||||
# Create a simple s3 bucket on Ceph Rados Gateway
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
s3_url: http://your-ceph-rados-gateway-server.xxx
|
||||
ceph: true
|
||||
|
||||
# Remove an s3 bucket and any keys it contains
|
||||
- s3_bucket:
|
||||
name: mys3bucket
|
||||
|
@ -99,6 +109,9 @@ EXAMPLES = '''
|
|||
import xml.etree.ElementTree as ET
|
||||
import urlparse
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.s3.connection import OrdinaryCallingFormat, Location
|
||||
|
@ -130,7 +143,7 @@ def create_tags_container(tags):
|
|||
tags_obj.add_tag_set(tag_set)
|
||||
return tags_obj
|
||||
|
||||
def create_bucket(connection, module, location):
|
||||
def _create_bucket(connection, module, location):
|
||||
|
||||
policy = module.params.get("policy")
|
||||
name = module.params.get("name")
|
||||
|
@ -141,11 +154,11 @@ def create_bucket(connection, module, location):
|
|||
|
||||
try:
|
||||
bucket = connection.get_bucket(name)
|
||||
except S3ResponseError, e:
|
||||
except S3ResponseError as e:
|
||||
try:
|
||||
bucket = connection.create_bucket(name, location=location)
|
||||
changed = True
|
||||
except S3CreateError, e:
|
||||
except S3CreateError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# Versioning
|
||||
|
@ -155,7 +168,7 @@ def create_bucket(connection, module, location):
|
|||
bucket.configure_versioning(versioning)
|
||||
changed = True
|
||||
versioning_status = bucket.get_versioning_status()
|
||||
except S3ResponseError, e:
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
elif not versioning_status and not versioning:
|
||||
# do nothing
|
||||
|
@ -185,7 +198,7 @@ def create_bucket(connection, module, location):
|
|||
# Policy
|
||||
try:
|
||||
current_policy = bucket.get_policy()
|
||||
except S3ResponseError, e:
|
||||
except S3ResponseError as e:
|
||||
if e.error_code == "NoSuchBucketPolicy":
|
||||
current_policy = None
|
||||
else:
|
||||
|
@ -200,7 +213,7 @@ def create_bucket(connection, module, location):
|
|||
bucket.set_policy(policy)
|
||||
changed = True
|
||||
current_policy = bucket.get_policy()
|
||||
except S3ResponseError, e:
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
elif current_policy is None and policy is not None:
|
||||
|
@ -210,7 +223,7 @@ def create_bucket(connection, module, location):
|
|||
bucket.set_policy(policy)
|
||||
changed = True
|
||||
current_policy = bucket.get_policy()
|
||||
except S3ResponseError, e:
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
elif current_policy is not None and policy is None:
|
||||
|
@ -218,7 +231,7 @@ def create_bucket(connection, module, location):
|
|||
bucket.delete_policy()
|
||||
changed = True
|
||||
current_policy = bucket.get_policy()
|
||||
except S3ResponseError, e:
|
||||
except S3ResponseError as e:
|
||||
if e.error_code == "NoSuchBucketPolicy":
|
||||
current_policy = None
|
||||
else:
|
||||
|
@ -232,7 +245,7 @@ def create_bucket(connection, module, location):
|
|||
try:
|
||||
current_tags = bucket.get_tags()
|
||||
tag_set = TagSet()
|
||||
except S3ResponseError, e:
|
||||
except S3ResponseError as e:
|
||||
if e.error_code == "NoSuchTagSet":
|
||||
current_tags = None
|
||||
else:
|
||||
|
@ -253,12 +266,12 @@ def create_bucket(connection, module, location):
|
|||
bucket.delete_tags()
|
||||
current_tags_dict = tags
|
||||
changed = True
|
||||
except S3ResponseError, e:
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
module.exit_json(changed=changed, name=bucket.name, versioning=versioning_status, requester_pays=requester_pays_status, policy=current_policy, tags=current_tags_dict)
|
||||
|
||||
def destroy_bucket(connection, module):
|
||||
def _destroy_bucket(connection, module):
|
||||
|
||||
force = module.params.get("force")
|
||||
name = module.params.get("name")
|
||||
|
@ -266,7 +279,7 @@ def destroy_bucket(connection, module):
|
|||
|
||||
try:
|
||||
bucket = connection.get_bucket(name)
|
||||
except S3ResponseError, e:
|
||||
except S3ResponseError as e:
|
||||
if e.error_code != "NoSuchBucket":
|
||||
module.fail_json(msg=e.message)
|
||||
else:
|
||||
|
@ -279,17 +292,50 @@ def destroy_bucket(connection, module):
|
|||
for key in bucket.list():
|
||||
key.delete()
|
||||
|
||||
except BotoServerError, e:
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
try:
|
||||
bucket = connection.delete_bucket(name)
|
||||
changed = True
|
||||
except S3ResponseError, e:
|
||||
except S3ResponseError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
def _create_bucket_ceph(connection, module, location):
|
||||
|
||||
name = module.params.get("name")
|
||||
|
||||
changed = False
|
||||
|
||||
try:
|
||||
bucket = connection.get_bucket(name)
|
||||
except S3ResponseError as e:
|
||||
try:
|
||||
bucket = connection.create_bucket(name, location=location)
|
||||
changed = True
|
||||
except S3CreateError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
def _destroy_bucket_ceph(connection, module):
|
||||
|
||||
_destroy_bucket(connection, module)
|
||||
|
||||
def create_bucket(connection, module, location, flavour='aws'):
|
||||
if flavour == 'ceph':
|
||||
_create_bucket_ceph(connection, module, location)
|
||||
else:
|
||||
_create_bucket(connection, module, location)
|
||||
|
||||
def destroy_bucket(connection, module, flavour='aws'):
|
||||
if flavour == 'ceph':
|
||||
_destroy_bucket_ceph(connection, module)
|
||||
else:
|
||||
_destroy_bucket(connection, module)
|
||||
|
||||
def is_fakes3(s3_url):
|
||||
""" Return True if s3_url has scheme fakes3:// """
|
||||
if s3_url is not None:
|
||||
|
@ -319,7 +365,8 @@ def main():
|
|||
s3_url = dict(aliases=['S3_URL']),
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
tags = dict(required=None, default={}, type='dict'),
|
||||
versioning = dict(default='no', type='bool')
|
||||
versioning = dict(default='no', type='bool'),
|
||||
ceph = dict(default='no', type='bool')
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -344,10 +391,27 @@ def main():
|
|||
if not s3_url and 'S3_URL' in os.environ:
|
||||
s3_url = os.environ['S3_URL']
|
||||
|
||||
ceph = module.params.get('ceph')
|
||||
|
||||
if ceph and not s3_url:
|
||||
module.fail_json(msg='ceph flavour requires s3_url')
|
||||
|
||||
flavour = 'aws'
|
||||
|
||||
# Look at s3_url and tweak connection settings
|
||||
# if connecting to Walrus or fakes3
|
||||
try:
|
||||
if is_fakes3(s3_url):
|
||||
if s3_url and ceph:
|
||||
ceph = urlparse.urlparse(s3_url)
|
||||
connection = boto.connect_s3(
|
||||
host=ceph.hostname,
|
||||
port=ceph.port,
|
||||
is_secure=ceph.scheme == 'https',
|
||||
calling_format=OrdinaryCallingFormat(),
|
||||
**aws_connect_params
|
||||
)
|
||||
flavour = 'ceph'
|
||||
elif is_fakes3(s3_url):
|
||||
fakes3 = urlparse.urlparse(s3_url)
|
||||
connection = S3Connection(
|
||||
is_secure=fakes3.scheme == 'fakes3s',
|
||||
|
@ -365,9 +429,9 @@ def main():
|
|||
if connection is None:
|
||||
connection = boto.connect_s3(**aws_connect_params)
|
||||
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Failed to connect to S3: %s' % str(e))
|
||||
|
||||
if connection is None: # this should never happen
|
||||
|
@ -376,12 +440,9 @@ def main():
|
|||
state = module.params.get("state")
|
||||
|
||||
if state == 'present':
|
||||
create_bucket(connection, module, location)
|
||||
create_bucket(connection, module, location, flavour=flavour)
|
||||
elif state == 'absent':
|
||||
destroy_bucket(connection, module)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
destroy_bucket(connection, module, flavour=flavour)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
Loading…
Reference in a new issue