merging with ansible devel

This commit is contained in:
Scot Spinner 2015-07-24 14:10:20 -07:00
commit b47bbfad1b
183 changed files with 8128 additions and 1379 deletions

View file

@ -10,7 +10,8 @@ addons:
- python2.4 - python2.4
- python2.6 - python2.6
script: script:
- python2.4 -m compileall -fq -x 'cloud/|/accelerate.py' . - python2.4 -m compileall -fq -x 'cloud/' .
- python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py
- python2.6 -m compileall -fq . - python2.6 -m compileall -fq .
- python2.7 -m compileall -fq . - python2.7 -m compileall -fq .
#- ./test-docs.sh core

View file

@ -22,6 +22,10 @@ I'd also read the community page above, but in particular, make sure you copy [t
Also please make sure you are testing on the latest released version of Ansible or the development branch. Also please make sure you are testing on the latest released version of Ansible or the development branch.
If you'd like to contribute code to an existing module
======================================================
Each module in Core is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team.
Thanks! Thanks!

View file

@ -102,11 +102,12 @@ AWS_REGIONS = ['ap-northeast-1',
def get_url(module, url): def get_url(module, url):
""" Get url and return response """ """ Get url and return response """
try:
r = urllib2.urlopen(url) r, info = fetch_url(module, url)
except (urllib2.HTTPError, urllib2.URLError), e: if info['status'] != 200:
code = getattr(e, 'code', -1) # Backwards compat
module.fail_json(msg="Request failed: %s" % str(e), status_code=code) info['status_code'] = info['status']
module.fail_json(**info)
return r return r
@ -182,7 +183,7 @@ def main():
choices=['i386', 'amd64']), choices=['i386', 'amd64']),
region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), region=dict(required=False, default='us-east-1', choices=AWS_REGIONS),
virt=dict(required=False, default='paravirtual', virt=dict(required=False, default='paravirtual',
choices=['paravirtual', 'hvm']) choices=['paravirtual', 'hvm']),
) )
module = AnsibleModule(argument_spec=arg_spec) module = AnsibleModule(argument_spec=arg_spec)
distro = module.params['distro'] distro = module.params['distro']
@ -195,7 +196,8 @@ def main():
# this is magic, see lib/ansible/module_common.py # this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>> from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View file

@ -51,16 +51,23 @@ options:
template: template:
description: description:
- The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present" - The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present"
Must give full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json"
required: false required: false
default: null default: null
aliases: [] aliases: []
notification_arns:
description:
- The Simple Notification Service (SNS) topic ARNs to publish stack related events.
required: false
default: null
version_added: "2.0"
stack_policy: stack_policy:
description: description:
- the path of the cloudformation stack policy - the path of the cloudformation stack policy
required: false required: false
default: null default: null
aliases: [] aliases: []
version_added: "x.x" version_added: "1.9"
tags: tags:
description: description:
- Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later. - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later.
@ -81,8 +88,14 @@ options:
- Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region as the stack. This parameter is mutually exclusive with 'template'. Either one of them is required if "state" parameter is "present" - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region as the stack. This parameter is mutually exclusive with 'template'. Either one of them is required if "state" parameter is "present"
required: false required: false
version_added: "2.0" version_added: "2.0"
template_format:
description: For local templates, allows specification of json or yaml format
default: json
choices: [ json, yaml ]
required: false
version_added: "2.0"
author: James S. Martin author: "James S. Martin (@jsmartin)"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''
@ -103,6 +116,22 @@ EXAMPLES = '''
tags: tags:
Stack: "ansible-cloudformation" Stack: "ansible-cloudformation"
# Basic role example
- name: launch ansible cloudformation example
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "roles/cloudformation/files/cloudformation-example.json"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: "ansible-cloudformation"
# Removal example # Removal example
- name: tear down old deployment - name: tear down old deployment
cloudformation: cloudformation:
@ -127,6 +156,7 @@ EXAMPLES = '''
import json import json
import time import time
import yaml
try: try:
import boto import boto
@ -191,6 +221,11 @@ def stack_operation(cfn, stack_name, operation):
events = map(str, list(stack.describe_events())), events = map(str, list(stack.describe_events())),
output = 'Stack %s failed' % operation) output = 'Stack %s failed' % operation)
break break
elif '%s_ROLLBACK_FAILED' % operation == stack.stack_status:
result = dict(changed=True, failed=True,
events = map(str, list(stack.describe_events())),
output = 'Stack %s rollback failed' % operation)
break
else: else:
time.sleep(5) time.sleep(5)
return result return result
@ -216,9 +251,11 @@ def main():
template_parameters=dict(required=False, type='dict', default={}), template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']), state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=False), template=dict(default=None, required=False),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False), stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'), disable_rollback=dict(default=False, type='bool'),
template_url=dict(default=None, required=False), template_url=dict(default=None, required=False),
template_format=dict(default='json', choices=['json', 'yaml'], required=False),
tags=dict(default=None) tags=dict(default=None)
) )
) )
@ -245,6 +282,14 @@ def main():
else: else:
template_body = None template_body = None
if module.params['template_format'] == 'yaml':
if template_body is None:
module.fail_json(msg='yaml format only supported for local templates')
else:
template_body = json.dumps(yaml.load(template_body), indent=2)
notification_arns = module.params['notification_arns']
if module.params['stack_policy'] is not None: if module.params['stack_policy'] is not None:
stack_policy_body = open(module.params['stack_policy'], 'r').read() stack_policy_body = open(module.params['stack_policy'], 'r').read()
else: else:
@ -285,6 +330,7 @@ def main():
try: try:
cfn.create_stack(stack_name, parameters=template_parameters_tup, cfn.create_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body, template_body=template_body,
notification_arns=notification_arns,
stack_policy_body=stack_policy_body, stack_policy_body=stack_policy_body,
template_url=template_url, template_url=template_url,
disable_rollback=disable_rollback, disable_rollback=disable_rollback,
@ -307,6 +353,7 @@ def main():
try: try:
cfn.update_stack(stack_name, parameters=template_parameters_tup, cfn.update_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body, template_body=template_body,
notification_arns=notification_arns,
stack_policy_body=stack_policy_body, stack_policy_body=stack_policy_body,
disable_rollback=disable_rollback, disable_rollback=disable_rollback,
template_url=template_url, template_url=template_url,

104
cloud/amazon/ec2.py Executable file → Normal file
View file

@ -44,7 +44,7 @@ options:
region: region:
version_added: "1.2" version_added: "1.2"
description: description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
required: false required: false
default: null default: null
aliases: [ 'aws_region', 'ec2_region' ] aliases: [ 'aws_region', 'ec2_region' ]
@ -57,16 +57,17 @@ options:
aliases: [ 'aws_zone', 'ec2_zone' ] aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type: instance_type:
description: description:
- instance type to use for the instance - instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
required: true required: true
default: null default: null
aliases: [] aliases: []
tenancy: tenancy:
version_added: "1.9" version_added: "1.9"
description: description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
required: false required: false
default: default default: default
choices: [ "default", "dedicated" ]
aliases: [] aliases: []
spot_price: spot_price:
version_added: "1.5" version_added: "1.5"
@ -75,6 +76,14 @@ options:
required: false required: false
default: null default: null
aliases: [] aliases: []
spot_type:
version_added: "2.0"
description:
- Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied.
required: false
default: "one-time"
choices: [ "one-time", "persistent" ]
aliases: []
image: image:
description: description:
- I(ami) ID to use for the instance - I(ami) ID to use for the instance
@ -123,6 +132,7 @@ options:
- enable detailed monitoring (CloudWatch) for instance - enable detailed monitoring (CloudWatch) for instance
required: false required: false
default: null default: null
choices: [ "yes", "no" ]
aliases: [] aliases: []
user_data: user_data:
version_added: "0.9" version_added: "0.9"
@ -186,7 +196,15 @@ options:
description: description:
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers) - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers)
required: false required: false
default: true default: yes
choices: [ "yes", "no" ]
termination_protection:
version_added: "2.0"
description:
- Enable or Disable the Termination Protection
required: false
default: no
choices: [ "yes", "no" ]
state: state:
version_added: "1.3" version_added: "1.3"
description: description:
@ -198,7 +216,7 @@ options:
volumes: volumes:
version_added: "1.5" version_added: "1.5"
description: description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. - "a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict."
required: false required: false
default: null default: null
aliases: [] aliases: []
@ -223,7 +241,10 @@ options:
default: null default: null
aliases: [] aliases: []
author: Seth Vidal, Tim Gerla, Lester Wade author:
- "Tim Gerla (@tgerla)"
- "Lester Wade (@lwade)"
- "Seth Vidal"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''
@ -606,6 +627,19 @@ def get_instance_info(inst):
except AttributeError: except AttributeError:
instance_info['ebs_optimized'] = False instance_info['ebs_optimized'] = False
try:
bdm_dict = {}
bdm = getattr(inst, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'status': bdm[device_name].status,
'volume_id': bdm[device_name].volume_id,
'delete_on_termination': bdm[device_name].delete_on_termination
}
instance_info['block_device_mapping'] = bdm_dict
except AttributeError:
instance_info['block_device_mapping'] = False
try: try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy') instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError: except AttributeError:
@ -658,6 +692,8 @@ def create_block_device(module, ec2, volume):
size = volume.get('volume_size', snapshot.volume_size) size = volume.get('volume_size', snapshot.volume_size)
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size: if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
if 'encrypted' in volume:
module.fail_json(msg = 'You can not set encyrption when creating a volume from a snapshot')
if 'ephemeral' in volume: if 'ephemeral' in volume:
if 'snapshot' in volume: if 'snapshot' in volume:
module.fail_json(msg = 'Cannot set both ephemeral and snapshot') module.fail_json(msg = 'Cannot set both ephemeral and snapshot')
@ -666,8 +702,8 @@ def create_block_device(module, ec2, volume):
size=volume.get('volume_size'), size=volume.get('volume_size'),
volume_type=volume.get('device_type'), volume_type=volume.get('device_type'),
delete_on_termination=volume.get('delete_on_termination', False), delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops')) iops=volume.get('iops'),
encrypted=volume.get('encrypted', None))
def boto_supports_param_in_spot_request(ec2, param): def boto_supports_param_in_spot_request(ec2, param):
""" """
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
@ -756,6 +792,7 @@ def create_instances(module, ec2, vpc, override_count=None):
instance_type = module.params.get('instance_type') instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy') tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price') spot_price = module.params.get('spot_price')
spot_type = module.params.get('spot_type')
image = module.params.get('image') image = module.params.get('image')
if override_count: if override_count:
count = override_count count = override_count
@ -779,6 +816,7 @@ def create_instances(module, ec2, vpc, override_count=None):
exact_count = module.params.get('exact_count') exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag') count_tag = module.params.get('count_tag')
source_dest_check = module.boolean(module.params.get('source_dest_check')) source_dest_check = module.boolean(module.params.get('source_dest_check'))
termination_protection = module.boolean(module.params.get('termination_protection'))
# group_id and group_name are exclusive of each other # group_id and group_name are exclusive of each other
if group_id and group_name: if group_id and group_name:
@ -948,6 +986,7 @@ def create_instances(module, ec2, vpc, override_count=None):
params.update(dict( params.update(dict(
count = count_remaining, count = count_remaining,
type = spot_type,
)) ))
res = ec2.request_spot_instances(spot_price, **params) res = ec2.request_spot_instances(spot_price, **params)
@ -1007,11 +1046,16 @@ def create_instances(module, ec2, vpc, override_count=None):
for res in res_list: for res in res_list:
running_instances.extend(res.instances) running_instances.extend(res.instances)
# Enabled by default by Amazon # Enabled by default by AWS
if not source_dest_check: if source_dest_check is False:
for inst in res.instances: for inst in res.instances:
inst.modify_attribute('sourceDestCheck', False) inst.modify_attribute('sourceDestCheck', False)
# Disabled by default by AWS
if termination_protection is True:
for inst in res.instances:
inst.modify_attribute('disableApiTermination', True)
# Leave this as late as possible to try and avoid InvalidInstanceID.NotFound # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
if instance_tags: if instance_tags:
try: try:
@ -1022,6 +1066,7 @@ def create_instances(module, ec2, vpc, override_count=None):
instance_dict_array = [] instance_dict_array = []
created_instance_ids = [] created_instance_ids = []
for inst in running_instances: for inst in running_instances:
inst.update()
d = get_instance_info(inst) d = get_instance_info(inst)
created_instance_ids.append(inst.id) created_instance_ids.append(inst.id)
instance_dict_array.append(d) instance_dict_array.append(d)
@ -1127,21 +1172,32 @@ def startstop_instances(module, ec2, instance_ids, state):
if not isinstance(instance_ids, list) or len(instance_ids) < 1: if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting') module.fail_json(msg='instance_ids should be a list of instances, aborting')
# Check that our instances are not in the state we want to take them to # Check (and eventually change) instances attributes and instances state
# and change them to our desired state
running_instances_array = [] running_instances_array = []
for res in ec2.get_all_instances(instance_ids): for res in ec2.get_all_instances(instance_ids):
for inst in res.instances: for inst in res.instances:
if inst.state != state:
instance_dict_array.append(get_instance_info(inst)) # Check "source_dest_check" attribute
try: if inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
if state == 'running': inst.modify_attribute('sourceDestCheck', source_dest_check)
inst.start() changed = True
else:
inst.stop() # Check "termination_protection" attribute
except EC2ResponseError, e: if inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) inst.modify_attribute('disableApiTermination', termination_protection)
changed = True changed = True
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError, e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
## Wait for all the instances to finish starting or stopping ## Wait for all the instances to finish starting or stopping
wait_timeout = time.time() + wait_timeout wait_timeout = time.time() + wait_timeout
@ -1175,6 +1231,7 @@ def main():
zone = dict(aliases=['aws_zone', 'ec2_zone']), zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']), instance_type = dict(aliases=['type']),
spot_price = dict(), spot_price = dict(),
spot_type = dict(default='one-time', choices=["one-time", "persistent"]),
image = dict(), image = dict(),
kernel = dict(), kernel = dict(),
count = dict(type='int', default='1'), count = dict(type='int', default='1'),
@ -1192,7 +1249,8 @@ def main():
instance_profile_name = dict(), instance_profile_name = dict(),
instance_ids = dict(type='list', aliases=['instance_id']), instance_ids = dict(type='list', aliases=['instance_id']),
source_dest_check = dict(type='bool', default=True), source_dest_check = dict(type='bool', default=True),
state = dict(default='present'), termination_protection = dict(type='bool', default=False),
state = dict(default='present', choices=['present', 'absent', 'running', 'stopped']),
exact_count = dict(type='int', default=None), exact_count = dict(type='int', default=None),
count_tag = dict(), count_tag = dict(),
volumes = dict(type='list'), volumes = dict(type='list'),

View file

@ -69,6 +69,12 @@ options:
- Image ID to be deregistered. - Image ID to be deregistered.
required: false required: false
default: null default: null
device_mapping:
version_added: "2.0"
description:
- An optional list of devices with custom configurations (same block-device-mapping parameters)
required: false
default: null
delete_snapshot: delete_snapshot:
description: description:
- Whether or not to delete an AMI while deregistering it. - Whether or not to delete an AMI while deregistering it.
@ -81,7 +87,7 @@ options:
default: null default: null
version_added: "2.0" version_added: "2.0"
author: Evan Duffield <eduffield@iacquire.com> author: "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''
@ -110,6 +116,23 @@ EXAMPLES = '''
name: newtest name: newtest
register: instance register: instance
# AMI Creation, with a custom root-device size and another EBS attached
- ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
size: YYY
delete_on_termination: false
volume_type: gp2
register: instance
# Deregister/Delete AMI # Deregister/Delete AMI
- ec2_ami: - ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
@ -136,6 +159,7 @@ import time
try: try:
import boto import boto
import boto.ec2 import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO = True HAS_BOTO = True
except ImportError: except ImportError:
HAS_BOTO = False HAS_BOTO = False
@ -155,6 +179,7 @@ def create_image(module, ec2):
wait_timeout = int(module.params.get('wait_timeout')) wait_timeout = int(module.params.get('wait_timeout'))
description = module.params.get('description') description = module.params.get('description')
no_reboot = module.params.get('no_reboot') no_reboot = module.params.get('no_reboot')
device_mapping = module.params.get('device_mapping')
tags = module.params.get('tags') tags = module.params.get('tags')
try: try:
@ -163,9 +188,29 @@ def create_image(module, ec2):
'description': description, 'description': description,
'no_reboot': no_reboot} 'no_reboot': no_reboot}
if device_mapping:
bdm = BlockDeviceMapping()
for device in device_mapping:
if 'device_name' not in device:
module.fail_json(msg = 'Device name must be set for volume')
device_name = device['device_name']
del device['device_name']
bd = BlockDeviceType(**device)
bdm[device_name] = bd
params['block_device_mapping'] = bdm
image_id = ec2.create_image(**params) image_id = ec2.create_image(**params)
except boto.exception.BotoServerError, e: except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if e.error_code == 'InvalidAMIName.Duplicate':
images = ec2.get_all_images()
for img in images:
if img.name == name:
module.exit_json(msg="AMI name already present", image_id=img.id, state=img.state, changed=False)
sys.exit(0)
else:
module.fail_json(msg="Error in retrieving duplicate AMI details")
else:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# Wait until the image is recognized. EC2 API has eventual consistency, # Wait until the image is recognized. EC2 API has eventual consistency,
# such that a successful CreateImage API call doesn't guarantee the success # such that a successful CreateImage API call doesn't guarantee the success
@ -248,8 +293,8 @@ def main():
description = dict(default=""), description = dict(default=""),
no_reboot = dict(default=False, type="bool"), no_reboot = dict(default=False, type="bool"),
state = dict(default='present'), state = dict(default='present'),
tags = dict(type='dict'), device_mapping = dict(type='list'),
tags = dict(type='dict')
) )
) )
module = AnsibleModule(argument_spec=argument_spec) module = AnsibleModule(argument_spec=argument_spec)
@ -282,4 +327,3 @@ from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import * from ansible.module_utils.ec2 import *
main() main()

View file

@ -25,7 +25,7 @@ description:
- Can search AMIs with different owners - Can search AMIs with different owners
- Can search by matching tag(s), by AMI name and/or other criteria - Can search by matching tag(s), by AMI name and/or other criteria
- Results can be sorted and sliced - Results can be sorted and sliced
author: Tom Bamford author: "Tom Bamford (@tombamford)"
notes: notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com. - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release. - See the example below for a suggestion of how to search by distro/release.
@ -141,7 +141,7 @@ EXAMPLES = '''
# Search for the AMI tagged "project:website" # Search for the AMI tagged "project:website"
- ec2_ami_find: - ec2_ami_find:
owner: self owner: self
tags: ami_tags:
project: website project: website
no_result_action: fail no_result_action: fail
register: ami_find register: ami_find

View file

@ -21,7 +21,7 @@ description:
- Can create or delete AWS Autoscaling Groups - Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations - Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6" version_added: "1.6"
author: Gareth Rushgrove author: "Gareth Rushgrove (@garethr)"
options: options:
state: state:
description: description:
@ -43,7 +43,7 @@ options:
launch_config_name: launch_config_name:
description: description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: false required: true
min_size: min_size:
description: description:
- Minimum number of instances in group - Minimum number of instances in group
@ -109,6 +109,12 @@ options:
default: EC2 default: EC2
version_added: "1.7" version_added: "1.7"
choices: ['EC2', 'ELB'] choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
required: false
default: 300 seconds
version_added: "2.0"
wait_timeout: wait_timeout:
description: description:
- how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option. - how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option.
@ -190,9 +196,13 @@ to "replace_instances":
''' '''
import time import time
import logging as log
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import * from ansible.module_utils.ec2 import *
log.getLogger('boto').setLevel(log.CRITICAL)
#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
try: try:
import boto.ec2.autoscale import boto.ec2.autoscale
@ -265,8 +275,71 @@ def get_properties(autoscaling_group):
if getattr(autoscaling_group, "tags", None): if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
return properties return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
as_group = asg_connection.get_all_groups(names=[group_name])[0]
wait_timeout = module.params.get('wait_timeout')
props = get_properties(as_group)
count = 1
if as_group.load_balancers and as_group.health_check_type == 'ELB':
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
return
exists = True
for lb in as_group.load_balancers:
elb_connection.deregister_instances(lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group.load_balancers:
lb_instances = elb_connection.describe_instance_health(lb)
for i in lb_instances:
if i.instance_id == instance_id and i.state == "InService":
count += 1
log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, module, group_name):
healthy_instances = []
as_group = asg_connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(instance)
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ELB instance status:")
for lb in as_group.load_balancers:
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
except boto.exception.InvalidInstance, e:
pass
for i in lb_instances:
if i.state == "InService":
healthy_instances.append(i.instance_id)
log.debug("{0}: {1}".format(i.instance_id, i.state))
return len(healthy_instances)
def wait_for_elb(asg_connection, module, group_name): def wait_for_elb(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module) region, ec2_url, aws_connect_params = get_aws_connection_info(module)
@ -277,36 +350,23 @@ def wait_for_elb(asg_connection, module, group_name):
as_group = asg_connection.get_all_groups(names=[group_name])[0] as_group = asg_connection.get_all_groups(names=[group_name])[0]
if as_group.load_balancers and as_group.health_check_type == 'ELB': if as_group.load_balancers and as_group.health_check_type == 'ELB':
log.debug("Waiting for ELB to consider intances healthy.")
try: try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e: except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
wait_timeout = time.time() + wait_timeout wait_timeout = time.time() + wait_timeout
healthy_instances = {} healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
while len(healthy_instances.keys()) < as_group.min_size and wait_timeout > time.time(): while healthy_instances < as_group.min_size and wait_timeout > time.time():
as_group = asg_connection.get_all_groups(names=[group_name])[0] healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
props = get_properties(as_group) log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(instance)
for lb in as_group.load_balancers:
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
except boto.exception.InvalidInstance, e:
pass
for i in lb_instances:
if i.state == "InService":
healthy_instances[i.instance_id] = i.state
time.sleep(10) time.sleep(10)
if wait_timeout <= time.time(): if wait_timeout <= time.time():
# waiting took too long # waiting took too long
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime()) module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
def create_autoscaling_group(connection, module): def create_autoscaling_group(connection, module):
group_name = module.params.get('name') group_name = module.params.get('name')
@ -320,6 +380,7 @@ def create_autoscaling_group(connection, module):
set_tags = module.params.get('tags') set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period') health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type') health_check_type = module.params.get('health_check_type')
default_cooldown = module.params.get('default_cooldown')
wait_for_instances = module.params.get('wait_for_instances') wait_for_instances = module.params.get('wait_for_instances')
as_groups = connection.get_all_groups(names=[group_name]) as_groups = connection.get_all_groups(names=[group_name])
wait_timeout = module.params.get('wait_timeout') wait_timeout = module.params.get('wait_timeout')
@ -359,12 +420,13 @@ def create_autoscaling_group(connection, module):
connection=connection, connection=connection,
tags=asg_tags, tags=asg_tags,
health_check_period=health_check_period, health_check_period=health_check_period,
health_check_type=health_check_type) health_check_type=health_check_type,
default_cooldown=default_cooldown)
try: try:
connection.create_auto_scaling_group(ag) connection.create_auto_scaling_group(ag)
if wait_for_instances == True: if wait_for_instances == True:
wait_for_new_instances(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name) wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0] as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group) asg_properties = get_properties(as_group)
@ -430,7 +492,7 @@ def create_autoscaling_group(connection, module):
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
if wait_for_instances == True: if wait_for_instances == True:
wait_for_new_instances(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name) wait_for_elb(connection, module, group_name)
try: try:
as_group = connection.get_all_groups(names=[group_name])[0] as_group = connection.get_all_groups(names=[group_name])[0]
@ -471,6 +533,15 @@ def get_chunks(l, n):
for i in xrange(0, len(l), n): for i in xrange(0, len(l), n):
yield l[i:i+n] yield l[i:i+n]
def update_size(group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size ))
group.max_size = max_size
group.min_size = min_size
group.desired_capacity = dc
group.update()
def replace(connection, module): def replace(connection, module):
batch_size = module.params.get('replace_batch_size') batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout') wait_timeout = module.params.get('wait_timeout')
@ -478,91 +549,191 @@ def replace(connection, module):
max_size = module.params.get('max_size') max_size = module.params.get('max_size')
min_size = module.params.get('min_size') min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity') desired_capacity = module.params.get('desired_capacity')
lc_check = module.params.get('lc_check')
# FIXME: we need some more docs about this feature
replace_instances = module.params.get('replace_instances') replace_instances = module.params.get('replace_instances')
as_group = connection.get_all_groups(names=[group_name])[0] as_group = connection.get_all_groups(names=[group_name])[0]
wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
props = get_properties(as_group) props = get_properties(as_group)
instances = props['instances'] instances = props['instances']
replaceable = 0
if replace_instances: if replace_instances:
instances = replace_instances instances = replace_instances
for k in props['instance_facts'].keys(): # check to see if instances are replaceable if checking launch configs
if k in instances:
if props['instance_facts'][k]['launch_config_name'] != props['launch_config_name']: new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
replaceable += 1 num_new_inst_needed = desired_capacity - len(new_instances)
if replaceable == 0:
if lc_check:
if num_new_inst_needed == 0 and old_instances:
log.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, module, old_instances, instances, True)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
batch_size = num_new_inst_needed
if not old_instances:
changed = False changed = False
return(changed, props) return(changed, props)
# set temporary settings and wait for them to be reached # set temporary settings and wait for them to be reached
# This should get overriden if the number of instances left is less than the batch size.
as_group = connection.get_all_groups(names=[group_name])[0] as_group = connection.get_all_groups(names=[group_name])[0]
as_group.max_size = max_size + batch_size update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
as_group.min_size = min_size + batch_size wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
as_group.desired_capacity = desired_capacity + batch_size
as_group.update()
wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
wait_for_elb(connection, module, group_name) wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0] as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group) props = get_properties(as_group)
instances = props['instances'] instances = props['instances']
if replace_instances: if replace_instances:
instances = replace_instances instances = replace_instances
log.debug("beginning main loop")
for i in get_chunks(instances, batch_size): for i in get_chunks(instances, batch_size):
terminate_batch(connection, module, i) # break out of this loop if we have enough new instances
wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
wait_for_term_inst(connection, module, term_instances)
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, module, group_name) wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0] as_group = connection.get_all_groups(names=[group_name])[0]
# return settings to normal if break_early:
as_group.max_size = max_size log.debug("breaking loop")
as_group.min_size = min_size break
as_group.desired_capacity = desired_capacity update_size(as_group, max_size, min_size, desired_capacity)
as_group.update()
as_group = connection.get_all_groups(names=[group_name])[0] as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group) asg_properties = get_properties(as_group)
log.debug("Rolling update complete.")
changed=True changed=True
return(changed, asg_properties) return(changed, asg_properties)
def terminate_batch(connection, module, replace_instances): def get_instances_by_lc(props, lc_check, initial_instances):
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0] new_instances = []
props = get_properties(as_group) old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props['instances']:
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG # check to make sure instances given are actually in the given ASG
# and they have a non-current launch config # and they have a non-current launch config
old_instances = []
instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances'])
if lc_check: if lc_check:
for i in instances: for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
old_instances.append(i) instances_to_terminate.append(i)
else: else:
old_instances = instances for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
# set all instances given to unhealthy def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
for instance_id in old_instances: batch_size = module.params.get('replace_batch_size')
connection.set_instance_health(instance_id,'Unhealthy') min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
desired_size = as_group.min_size
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: {0}".format(num_new_inst_needed))
log.debug("new instances: {0}".format(new_instances))
log.debug("old instances: {0}".format(old_instances))
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group.min_size != min_size:
as_group.min_size = min_size
as_group.update()
log.debug("Updating minimum size back to original of {0}".format(min_size))
#if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
log.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed !=0 :
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("{0} new instances needed".format(num_new_inst_needed))
log.debug("decrementing capacity: {0}".format(decrement_capacity))
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are # we wait to make sure the machines we marked as Unhealthy are
# no longer in the list # no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, module, term_instances):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
count = 1 count = 1
wait_timeout = time.time() + wait_timeout wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0: while wait_timeout > time.time() and count > 0:
log.debug("waiting for instances to terminate")
count = 0 count = 0
as_group = connection.get_all_groups(names=[group_name])[0] as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group) props = get_properties(as_group)
instance_facts = props['instance_facts'] instance_facts = props['instance_facts']
instances = ( i for i in instance_facts if i in old_instances) instances = ( i for i in instance_facts if i in term_instances)
for i in instances: for i in instances:
if ( instance_facts[i]['lifecycle_state'] == 'Terminating' lifecycle = instance_facts[i]['lifecycle_state']
or instance_facts[i]['health_status'] == 'Unhealthy' ): health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
if lifecycle == 'Terminating' or healthy == 'Unhealthy':
count += 1 count += 1
time.sleep(10) time.sleep(10)
@ -570,21 +741,24 @@ def terminate_batch(connection, module, replace_instances):
# waiting took too long # waiting took too long
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime()) module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_instances(module, connection, group_name, wait_timeout, desired_size, prop):
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop. # make sure we have the latest stats after that last loop.
as_group = connection.get_all_groups(names=[group_name])[0] as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group) props = get_properties(as_group)
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state # now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]: while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
time.sleep(10) time.sleep(10)
as_group = connection.get_all_groups(names=[group_name])[0] as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group) props = get_properties(as_group)
if wait_timeout <= time.time(): if wait_timeout <= time.time():
# waiting took too long # waiting took too long
module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime()) module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached {0}: {1}".format(prop, desired_size))
return props return props
def main(): def main():
@ -608,6 +782,7 @@ def main():
tags=dict(type='list', default=[]), tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300), health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True) wait_for_instances=dict(type='bool', default=True)
), ),
) )

View file

@ -37,25 +37,21 @@ options:
version_added: "1.4" version_added: "1.4"
reuse_existing_ip_allowed: reuse_existing_ip_allowed:
description: description:
- Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one. - Reuse an EIP that is not associated to an instance (when available),'''
''' instead of allocating a new one.
required: false required: false
default: false default: false
version_added: "1.6" version_added: "1.6"
wait_timeout:
description:
- how long to wait in seconds for newly provisioned EIPs to become available
default: 300
version_added: "1.7"
extends_documentation_fragment: aws extends_documentation_fragment: aws
author: Lorin Hochstein <lorin@nimbisservices.com> author: "Lorin Hochstein (@lorin) <lorin@nimbisservices.com>"
notes: notes:
- This module will return C(public_ip) on success, which will contain the - This module will return C(public_ip) on success, which will contain the
public IP address associated with the instance. public IP address associated with the instance.
- There may be a delay between the time the Elastic IP is assigned and when - There may be a delay between the time the Elastic IP is assigned and when
the cloud instance is reachable via the new address. Use wait_for and pause the cloud instance is reachable via the new address. Use wait_for and
to delay further playbook execution until the instance is reachable, if pause to delay further playbook execution until the instance is reachable,
necessary. if necessary.
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -78,7 +74,8 @@ EXAMPLES = '''
ec2_eip: state='present' ec2_eip: state='present'
- name: provision new instances with ec2 - name: provision new instances with ec2
ec2: keypair=mykey instance_type=c1.medium image=ami-40603AD1 wait=yes group=webserver count=3 ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes'''
''' group=webserver count=3
register: ec2 register: ec2
- name: associate new elastic IPs with each of the instances - name: associate new elastic IPs with each of the instances
ec2_eip: "instance_id={{ item }}" ec2_eip: "instance_id={{ item }}"
@ -97,178 +94,165 @@ try:
except ImportError: except ImportError:
HAS_BOTO = False HAS_BOTO = False
wait_timeout = 0
def associate_ip_and_instance(ec2, address, instance_id, module): class EIPException(Exception):
if ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): pass
module.exit_json(changed=False, public_ip=address.public_ip)
def associate_ip_and_instance(ec2, address, instance_id, check_mode):
if address_is_associated_with_instance(ec2, address, instance_id):
return {'changed': False}
# If we're in check mode, nothing else to do # If we're in check mode, nothing else to do
if module.check_mode: if not check_mode:
module.exit_json(changed=True) if address.domain == 'vpc':
res = ec2.associate_address(instance_id,
try: allocation_id=address.allocation_id)
if address.domain == "vpc":
res = ec2.associate_address(instance_id, allocation_id=address.allocation_id)
else: else:
res = ec2.associate_address(instance_id, public_ip=address.public_ip) res = ec2.associate_address(instance_id,
except boto.exception.EC2ResponseError, e: public_ip=address.public_ip)
module.fail_json(msg=str(e)) if not res:
raise EIPException('association failed')
if res: return {'changed': True}
module.exit_json(changed=True, public_ip=address.public_ip)
else:
module.fail_json(msg="association failed")
def disassociate_ip_and_instance(ec2, address, instance_id, module): def disassociate_ip_and_instance(ec2, address, instance_id, check_mode):
if not ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): if not address_is_associated_with_instance(ec2, address, instance_id):
module.exit_json(changed=False, public_ip=address.public_ip) return {'changed': False}
# If we're in check mode, nothing else to do # If we're in check mode, nothing else to do
if module.check_mode: if not check_mode:
module.exit_json(changed=True) if address.domain == 'vpc':
res = ec2.disassociate_address(
try: association_id=address.association_id)
if address.domain == "vpc":
res = ec2.disassociate_address(association_id=address.association_id)
else: else:
res = ec2.disassociate_address(public_ip=address.public_ip) res = ec2.disassociate_address(public_ip=address.public_ip)
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e))
if res: if not res:
module.exit_json(changed=True) raise EIPException('disassociation failed')
else:
module.fail_json(msg="disassociation failed") return {'changed': True}
def find_address(ec2, public_ip, module, fail_on_not_found=True): def _find_address_by_ip(ec2, public_ip):
try:
return ec2.get_all_addresses([public_ip])[0]
except boto.exception.EC2ResponseError as e:
if "Address '{}' not found.".format(public_ip) not in e.message:
raise
def _find_address_by_instance_id(ec2, instance_id):
addresses = ec2.get_all_addresses(None, {'instance-id': instance_id})
if addresses:
return addresses[0]
def find_address(ec2, public_ip, instance_id):
""" Find an existing Elastic IP address """ """ Find an existing Elastic IP address """
if wait_timeout != 0: if public_ip:
timeout = time.time() + wait_timeout return _find_address_by_ip(ec2, public_ip)
while timeout > time.time(): elif instance_id:
try: return _find_address_by_instance_id(ec2, instance_id)
addresses = ec2.get_all_addresses([public_ip])
break
except boto.exception.EC2ResponseError, e:
if "Address '%s' not found." % public_ip in e.message :
if not fail_on_not_found:
return None
else:
module.fail_json(msg=str(e.message))
time.sleep(5)
if timeout <= time.time():
module.fail_json(msg = "wait for EIPs timeout on %s" % time.asctime())
else:
try:
addresses = ec2.get_all_addresses([public_ip])
except boto.exception.EC2ResponseError, e:
if "Address '%s' not found." % public_ip in e.message :
if not fail_on_not_found:
return None
module.fail_json(msg=str(e.message))
return addresses[0]
def ip_is_associated_with_instance(ec2, public_ip, instance_id, module): def address_is_associated_with_instance(ec2, address, instance_id):
""" Check if the elastic IP is currently associated with the instance """ """ Check if the elastic IP is currently associated with the instance """
address = find_address(ec2, public_ip, module)
if address: if address:
return address.instance_id == instance_id return address and address.instance_id == instance_id
else: return False
return False
def instance_is_associated(ec2, instance, module):
"""
Check if the given instance object is already associated with an
elastic IP
"""
instance_ip = instance.ip_address
if not instance_ip:
return False
eip = find_address(ec2, instance_ip, module, fail_on_not_found=False)
return (eip and (eip.public_ip == instance_ip))
def allocate_address(ec2, domain, module, reuse_existing_ip_allowed): def allocate_address(ec2, domain, reuse_existing_ip_allowed):
""" Allocate a new elastic IP address (when needed) and return it """ """ Allocate a new elastic IP address (when needed) and return it """
# If we're in check mode, nothing else to do
if module.check_mode:
module.exit_json(change=True)
if reuse_existing_ip_allowed: if reuse_existing_ip_allowed:
if domain: domain_filter = {'domain': domain or 'standard'}
domain_filter = { 'domain' : domain } all_addresses = ec2.get_all_addresses(filters=domain_filter)
else:
domain_filter = { 'domain' : 'standard' }
all_addresses = ec2.get_all_addresses(filters=domain_filter)
unassociated_addresses = filter(lambda a: not a.instance_id, all_addresses) unassociated_addresses = [a for a in all_addresses
if unassociated_addresses: if not a.instance_id]
address = unassociated_addresses[0]; if unassociated_addresses:
else: return unassociated_addresses[0]
address = ec2.allocate_address(domain=domain)
else:
address = ec2.allocate_address(domain=domain)
return address return ec2.allocate_address(domain=domain)
def release_address(ec2, public_ip, module): def release_address(ec2, address, check_mode):
""" Release a previously allocated elastic IP address """ """ Release a previously allocated elastic IP address """
address = find_address(ec2, public_ip, module)
# If we're in check mode, nothing else to do # If we're in check mode, nothing else to do
if module.check_mode: if not check_mode:
module.exit_json(change=True) if not address.release():
EIPException('release failed')
res = address.release() return {'changed': True}
if res:
module.exit_json(changed=True)
else:
module.fail_json(msg="release failed")
def find_instance(ec2, instance_id, module): def find_instance(ec2, instance_id):
""" Attempt to find the EC2 instance and return it """ """ Attempt to find the EC2 instance and return it """
try: reservations = ec2.get_all_reservations(instance_ids=[instance_id])
reservations = ec2.get_all_reservations(instance_ids=[instance_id])
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e))
if len(reservations) == 1: if len(reservations) == 1:
instances = reservations[0].instances instances = reservations[0].instances
if len(instances) == 1: if len(instances) == 1:
return instances[0] return instances[0]
module.fail_json(msg="could not find instance" + instance_id) raise EIPException("could not find instance" + instance_id)
def allocate_eip(ec2, eip_domain, module, reuse_existing_ip_allowed, new_eip_timeout): def ensure_present(ec2, domain, address, instance_id,
# Allocate a new elastic IP reuse_existing_ip_allowed, check_mode):
address = allocate_address(ec2, eip_domain, module, reuse_existing_ip_allowed) changed = False
# overriding the timeout since this is a a newly provisioned ip
global wait_timeout # Return the EIP object since we've been given a public IP
wait_timeout = new_eip_timeout if not address:
return address if check_mode:
return {'changed': True}
address = allocate_address(ec2, domain, reuse_existing_ip_allowed)
changed = True
if instance_id:
# Allocate an IP for instance since no public_ip was provided
instance = find_instance(ec2, instance_id)
if instance.vpc_id:
domain = 'vpc'
# Associate address object (provided or allocated) with instance
assoc_result = associate_ip_and_instance(ec2, address, instance_id,
check_mode)
changed = changed or assoc_result['changed']
return {'changed': changed, 'public_ip': address.public_ip}
def ensure_absent(ec2, domain, address, instance_id, check_mode):
if not address:
return {'changed': False}
# disassociating address from instance
if instance_id:
return disassociate_ip_and_instance(ec2, address, instance_id,
check_mode)
# releasing address
else:
return release_address(ec2, address, check_mode)
def main(): def main():
argument_spec = ec2_argument_spec() argument_spec = ec2_argument_spec()
argument_spec.update(dict( argument_spec.update(dict(
instance_id = dict(required=False), instance_id=dict(required=False),
public_ip = dict(required=False, aliases= ['ip']), public_ip=dict(required=False, aliases=['ip']),
state = dict(required=False, default='present', state=dict(required=False, default='present',
choices=['present', 'absent']), choices=['present', 'absent']),
in_vpc = dict(required=False, type='bool', default=False), in_vpc=dict(required=False, type='bool', default=False),
reuse_existing_ip_allowed = dict(required=False, type='bool', default=False), reuse_existing_ip_allowed=dict(required=False, type='bool',
wait_timeout = dict(default=300), default=False),
) wait_timeout=dict(default=300),
) ))
module = AnsibleModule( module = AnsibleModule(
argument_spec=argument_spec, argument_spec=argument_spec,
@ -284,54 +268,27 @@ def main():
public_ip = module.params.get('public_ip') public_ip = module.params.get('public_ip')
state = module.params.get('state') state = module.params.get('state')
in_vpc = module.params.get('in_vpc') in_vpc = module.params.get('in_vpc')
domain = "vpc" if in_vpc else None domain = 'vpc' if in_vpc else None
reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
new_eip_timeout = int(module.params.get('wait_timeout'))
if state == 'present': try:
# If both instance_id and public_ip are not specified, allocate a new address = find_address(ec2, public_ip, instance_id)
# elastic IP, and exit.
if not instance_id and not public_ip:
address = allocate_eip(ec2, domain, module,
reuse_existing_ip_allowed, new_eip_timeout)
module.exit_json(changed=True, public_ip=address.public_ip)
# Return the EIP object since we've been given a public IP if state == 'present':
if public_ip: result = ensure_present(ec2, domain, address, instance_id,
address = find_address(ec2, public_ip, module) reuse_existing_ip_allowed,
module.check_mode)
if instance_id and not public_ip:
instance = find_instance(ec2, instance_id, module)
if instance.vpc_id:
domain = "vpc"
# Do nothing if the instance is already associated with an
# elastic IP.
if instance_is_associated(ec2, instance, module):
module.exit_json(changed=False, public_ip=instance.ip_address)
# If the instance is not already associated with an elastic IP,
# allocate a new one.
address = allocate_eip(
ec2, domain, module, reuse_existing_ip_allowed, new_eip_timeout)
# Associate address object (provided or allocated) with instance
associate_ip_and_instance(ec2, address, instance_id, module)
else:
#disassociating address from instance
if instance_id:
address = find_address(ec2, public_ip, module)
disassociate_ip_and_instance(ec2, address, instance_id, module)
#releasing address
else: else:
release_address(ec2, public_ip, module) result = ensure_absent(ec2, domain, address, instance_id, module.check_mode)
except (boto.exception.EC2ResponseError, EIPException) as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import * # noqa
from ansible.module_utils.ec2 import * from ansible.module_utils.ec2 import * # noqa
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View file

@ -25,7 +25,7 @@ description:
if state=absent is passed as an argument. if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on. - Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2" version_added: "1.2"
author: John Jarvis author: "John Jarvis (@jarv)"
options: options:
state: state:
description: description:
@ -103,6 +103,7 @@ import time
try: try:
import boto import boto
import boto.ec2 import boto.ec2
import boto.ec2.autoscale
import boto.ec2.elb import boto.ec2.elb
from boto.regioninfo import RegionInfo from boto.regioninfo import RegionInfo
HAS_BOTO = True HAS_BOTO = True
@ -129,9 +130,9 @@ class ElbManager:
for lb in self.lbs: for lb in self.lbs:
initial_state = self._get_instance_health(lb) initial_state = self._get_instance_health(lb)
if initial_state is None: if initial_state is None:
# The instance isn't registered with this ELB so just # Instance isn't registered with this load
# return unchanged # balancer. Ignore it and try the next one.
return continue
lb.deregister_instances([self.instance_id]) lb.deregister_instances([self.instance_id])
@ -254,6 +255,9 @@ class ElbManager:
for elb lookup instead of returning what elbs for elb lookup instead of returning what elbs
are attached to self.instance_id""" are attached to self.instance_id"""
if not ec2_elbs:
ec2_elbs = self._get_auto_scaling_group_lbs()
try: try:
elb = connect_to_aws(boto.ec2.elb, self.region, elb = connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params) **self.aws_connect_params)
@ -272,6 +276,32 @@ class ElbManager:
lbs.append(lb) lbs.append(lb)
return lbs return lbs
def _get_auto_scaling_group_lbs(self):
"""Returns a list of ELBs associated with self.instance_id
indirectly through its auto scaling group membership"""
try:
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
if len(asg_instances) > 1:
self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
if not asg_instances:
asg_elbs = []
else:
asg_name = asg_instances[0].group_name
asgs = asg.get_all_groups([asg_name])
if len(asg_instances) != 1:
self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
asg_elbs = asgs[0].load_balancers
return asg_elbs
def _get_instance(self): def _get_instance(self):
"""Returns a boto.ec2.InstanceObject for self.instance_id""" """Returns a boto.ec2.InstanceObject for self.instance_id"""
try: try:

View file

@ -22,7 +22,9 @@ description:
- Will be marked changed when called only if state is changed. - Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB. short_description: Creates or destroys Amazon ELB.
version_added: "1.5" version_added: "1.5"
author: Jim Dalton author:
- "Jim Dalton (@jsdalton)"
- "Rick Mendes (@rickmendes)"
options: options:
state: state:
description: description:
@ -56,6 +58,12 @@ options:
require: false require: false
default: None default: None
version_added: "1.6" version_added: "1.6"
security_group_names:
description:
- A list of security group names to apply to the elb
require: false
default: None
version_added: "2.0"
health_check: health_check:
description: description:
- An associative array of health check configuration settings (see example) - An associative array of health check configuration settings (see example)
@ -361,7 +369,8 @@ class ElbManager(object):
if not check_elb: if not check_elb:
info = { info = {
'name': self.name, 'name': self.name,
'status': self.status 'status': self.status,
'region': self.region
} }
else: else:
try: try:
@ -384,9 +393,34 @@ class ElbManager(object):
'hosted_zone_name': check_elb.canonical_hosted_zone_name, 'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy, 'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy 'app_cookie_policy': app_cookie_policy,
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
} }
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check: if check_elb.health_check:
info['health_check'] = { info['health_check'] = {
'target': check_elb.health_check.target, 'target': check_elb.health_check.target,
@ -792,6 +826,7 @@ def main():
zones={'default': None, 'required': False, 'type': 'list'}, zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'}, purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'}, security_group_ids={'default': None, 'required': False, 'type': 'list'},
security_group_names={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'}, health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'}, subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'}, purge_subnets={'default': False, 'required': False, 'type': 'bool'},
@ -804,6 +839,7 @@ def main():
module = AnsibleModule( module = AnsibleModule(
argument_spec=argument_spec, argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
) )
if not HAS_BOTO: if not HAS_BOTO:
@ -820,6 +856,7 @@ def main():
zones = module.params['zones'] zones = module.params['zones']
purge_zones = module.params['purge_zones'] purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids'] security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check'] health_check = module.params['health_check']
subnets = module.params['subnets'] subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets'] purge_subnets = module.params['purge_subnets']
@ -834,6 +871,21 @@ def main():
if state == 'present' and not (zones or subnets): if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if security_group_names:
security_group_ids = []
try:
ec2 = ec2_connect(module)
grp_details = ec2.get_all_security_groups()
for group_name in security_group_names:
if isinstance(group_name, basestring):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones, elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check, purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme, subnets, purge_subnets, scheme,

View file

@ -36,7 +36,7 @@ description:
The module must be called from within the EC2 instance itself. The module must be called from within the EC2 instance itself.
notes: notes:
- Parameters to filter on ec2_facts may be added later. - Parameters to filter on ec2_facts may be added later.
author: "Silviu Dicu <silviudicu@gmail.com>" author: "Silviu Dicu (@silviud) <silviudicu@gmail.com>"
''' '''
EXAMPLES = ''' EXAMPLES = '''

View file

@ -5,6 +5,7 @@
DOCUMENTATION = ''' DOCUMENTATION = '''
--- ---
module: ec2_group module: ec2_group
author: "Andrew de Quincey (@adq)"
version_added: "1.3" version_added: "1.3"
short_description: maintain an ec2 VPC security group. short_description: maintain an ec2 VPC security group.
description: description:
@ -24,15 +25,11 @@ options:
required: false required: false
rules: rules:
description: description:
- List of firewall inbound rules to enforce in this group (see''' - List of firewall inbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no inbound rules will be enabled.
''' example). If none are supplied, a default all-out rule is assumed.'''
''' If an empty list is supplied, no inbound rules will be enabled.
required: false required: false
rules_egress: rules_egress:
description: description:
- List of firewall outbound rules to enforce in this group (see''' - List of firewall outbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
''' example). If none are supplied, a default all-out rule is assumed.'''
''' If an empty list is supplied, no outbound rules will be enabled.
required: false required: false
version_added: "1.6" version_added: "1.6"
region: region:
@ -90,6 +87,14 @@ EXAMPLES = '''
from_port: 22 from_port: 22
to_port: 22 to_port: 22
cidr_ip: 10.0.0.0/8 cidr_ip: 10.0.0.0/8
- proto: tcp
from_port: 443
to_port: 443
group_id: amazon-elb/sg-87654321/amazon-elb-sg
- proto: tcp
from_port: 3306
to_port: 3306
group_id: 123412341234/sg-87654321/exact-name-of-sg
- proto: udp - proto: udp
from_port: 10050 from_port: 10050
to_port: 10050 to_port: 10050
@ -113,6 +118,7 @@ EXAMPLES = '''
try: try:
import boto.ec2 import boto.ec2
from boto.ec2.securitygroup import SecurityGroup
HAS_BOTO = True HAS_BOTO = True
except ImportError: except ImportError:
HAS_BOTO = False HAS_BOTO = False
@ -122,6 +128,11 @@ def make_rule_key(prefix, rule, group_id, cidr_ip):
"""Creates a unique key for an individual group rule""" """Creates a unique key for an individual group rule"""
if isinstance(rule, dict): if isinstance(rule, dict):
proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')] proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
#fix for 11177
if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
from_port = 'none'
to_port = 'none'
else: # isinstance boto.ec2.securitygroup.IPPermissions else: # isinstance boto.ec2.securitygroup.IPPermissions
proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')] proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')]
@ -135,6 +146,22 @@ def addRulesToLookup(rules, prefix, dict):
dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant) dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant)
def validate_rule(module, rule):
VALID_PARAMS = ('cidr_ip',
'group_id', 'group_name', 'group_desc',
'proto', 'from_port', 'to_port')
for k in rule:
if k not in VALID_PARAMS:
module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_id OR cidr_ip, not both')
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_name OR cidr_ip, not both')
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg='Specify group_id OR group_name, not both')
def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
""" """
Returns tuple of (group_id, ip) after validating rule params. Returns tuple of (group_id, ip) after validating rule params.
@ -148,6 +175,7 @@ def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
group_id or a non-None ip range. group_id or a non-None ip range.
""" """
FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
group_id = None group_id = None
group_name = None group_name = None
ip = None ip = None
@ -158,6 +186,12 @@ def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
module.fail_json(msg="Specify group_name OR cidr_ip, not both") module.fail_json(msg="Specify group_name OR cidr_ip, not both")
elif 'group_id' in rule and 'group_name' in rule: elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg="Specify group_id OR group_name, not both") module.fail_json(msg="Specify group_id OR group_name, not both")
elif 'group_id' in rule and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
group_instance = SecurityGroup(owner_id=owner_id, name=group_name, id=group_id)
groups[group_id] = group_instance
groups[group_name] = group_instance
elif 'group_id' in rule: elif 'group_id' in rule:
group_id = rule['group_id'] group_id = rule['group_id']
elif 'group_name' in rule: elif 'group_name' in rule:
@ -291,6 +325,8 @@ def main():
# Now, go through all provided rules and ensure they are there. # Now, go through all provided rules and ensure they are there.
if rules is not None: if rules is not None:
for rule in rules: for rule in rules:
validate_rule(module, rule)
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id) group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created: if target_group_created:
changed = True changed = True
@ -319,6 +355,11 @@ def main():
for (rule, grant) in groupRules.itervalues() : for (rule, grant) in groupRules.itervalues() :
grantGroup = None grantGroup = None
if grant.group_id: if grant.group_id:
if grant.owner_id != group.owner_id:
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
group_instance = SecurityGroup(owner_id=grant.owner_id, name=grant.name, id=grant.group_id)
groups[grant.group_id] = group_instance
groups[grant.name] = group_instance
grantGroup = groups[grant.group_id] grantGroup = groups[grant.group_id]
if not module.check_mode: if not module.check_mode:
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
@ -331,6 +372,8 @@ def main():
# Now, go through all provided rules and ensure they are there. # Now, go through all provided rules and ensure they are there.
if rules_egress is not None: if rules_egress is not None:
for rule in rules_egress: for rule in rules_egress:
validate_rule(module, rule)
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id) group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created: if target_group_created:
changed = True changed = True

View file

@ -46,7 +46,7 @@ options:
version_added: "1.6" version_added: "1.6"
extends_documentation_fragment: aws extends_documentation_fragment: aws
author: Vincent Viallet author: "Vincent Viallet (@zbal)"
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -127,25 +127,23 @@ def main():
if state == 'absent': if state == 'absent':
if key: if key:
'''found a match, delete it''' '''found a match, delete it'''
try: if not module.check_mode:
key.delete() try:
if wait: key.delete()
start = time.time() if wait:
action_complete = False start = time.time()
while (time.time() - start) < wait_timeout: action_complete = False
if not ec2.get_key_pair(name): while (time.time() - start) < wait_timeout:
action_complete = True if not ec2.get_key_pair(name):
break action_complete = True
time.sleep(1) break
if not action_complete: time.sleep(1)
module.fail_json(msg="timed out while waiting for the key to be removed") if not action_complete:
except Exception, e: module.fail_json(msg="timed out while waiting for the key to be removed")
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) except Exception, e:
else: module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
key = None key = None
changed = True changed = True
else:
'''no match found, no changes required'''
# Ensure requested key is present # Ensure requested key is present
elif state == 'present': elif state == 'present':

View file

@ -26,7 +26,7 @@ notes:
after it is changed will not modify the launch configuration on AWS. You must create a new config and assign after it is changed will not modify the launch configuration on AWS. You must create a new config and assign
it to the ASG instead." it to the ASG instead."
version_added: "1.6" version_added: "1.6"
author: Gareth Rushgrove author: "Gareth Rushgrove (@garethr)"
options: options:
state: state:
description: description:
@ -116,6 +116,18 @@ options:
default: false default: false
aliases: [] aliases: []
version_added: "1.8" version_added: "1.8"
classic_link_vpc_id:
description:
- Id of ClassicLink enabled VPC
required: false
default: null
version_added: "2.0"
classic_link_vpc_security_groups:
description:
- A list of security group ids with which to associate the ClassicLink VPC instances.
required: false
default: null
version_added: "2.0"
extends_documentation_fragment: aws extends_documentation_fragment: aws
""" """
@ -126,6 +138,12 @@ EXAMPLES = '''
key_name: default key_name: default
security_groups: ['group', 'group2' ] security_groups: ['group', 'group2' ]
instance_type: t1.micro instance_type: t1.micro
volumes:
- device_name: /dev/sda1
volume_size: 100
device_type: io1
iops: 3000
delete_on_termination: true
''' '''
@ -178,6 +196,8 @@ def create_launch_config(connection, module):
ramdisk_id = module.params.get('ramdisk_id') ramdisk_id = module.params.get('ramdisk_id')
instance_profile_name = module.params.get('instance_profile_name') instance_profile_name = module.params.get('instance_profile_name')
ebs_optimized = module.params.get('ebs_optimized') ebs_optimized = module.params.get('ebs_optimized')
classic_link_vpc_id = module.params.get('classic_link_vpc_id')
classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups')
bdm = BlockDeviceMapping() bdm = BlockDeviceMapping()
if volumes: if volumes:
@ -200,10 +220,12 @@ def create_launch_config(connection, module):
kernel_id=kernel_id, kernel_id=kernel_id,
spot_price=spot_price, spot_price=spot_price,
instance_monitoring=instance_monitoring, instance_monitoring=instance_monitoring,
associate_public_ip_address = assign_public_ip, associate_public_ip_address=assign_public_ip,
ramdisk_id=ramdisk_id, ramdisk_id=ramdisk_id,
instance_profile_name=instance_profile_name, instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized, ebs_optimized=ebs_optimized,
classic_link_vpc_security_groups=classic_link_vpc_security_groups,
classic_link_vpc_id=classic_link_vpc_id,
) )
launch_configs = connection.get_all_launch_configurations(names=[name]) launch_configs = connection.get_all_launch_configurations(names=[name])
@ -219,7 +241,8 @@ def create_launch_config(connection, module):
module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time),
image_id=result.image_id, arn=result.launch_configuration_arn, image_id=result.image_id, arn=result.launch_configuration_arn,
security_groups=result.security_groups, instance_type=instance_type) security_groups=result.security_groups, instance_type=result.instance_type,
result=result)
def delete_launch_config(connection, module): def delete_launch_config(connection, module):
@ -251,7 +274,9 @@ def main():
ebs_optimized=dict(default=False, type='bool'), ebs_optimized=dict(default=False, type='bool'),
associate_public_ip_address=dict(type='bool'), associate_public_ip_address=dict(type='bool'),
instance_monitoring=dict(default=False, type='bool'), instance_monitoring=dict(default=False, type='bool'),
assign_public_ip=dict(type='bool') assign_public_ip=dict(type='bool'),
classic_link_vpc_security_groups=dict(type='list'),
classic_link_vpc_id=dict(type='str')
) )
) )

View file

@ -21,7 +21,7 @@ description:
- Can create or delete AWS metric alarms - Can create or delete AWS metric alarms
- Metrics you wish to alarm on must already exist - Metrics you wish to alarm on must already exist
version_added: "1.6" version_added: "1.6"
author: Zacharie Eakin author: "Zacharie Eakin (@zeekin)"
options: options:
state: state:
description: description:

View file

@ -7,7 +7,7 @@ description:
- Can create or delete scaling policies for autoscaling groups - Can create or delete scaling policies for autoscaling groups
- Referenced autoscaling groups must already exist - Referenced autoscaling groups must already exist
version_added: "1.6" version_added: "1.6"
author: Zacharie Eakin author: "Zacharie Eakin (@zeekin)"
options: options:
state: state:
description: description:

View file

@ -75,7 +75,7 @@ options:
required: false required: false
version_added: "1.9" version_added: "1.9"
author: Will Thames author: "Will Thames (@willthames)"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''

View file

@ -42,7 +42,7 @@ options:
default: null default: null
aliases: ['aws_region', 'ec2_region'] aliases: ['aws_region', 'ec2_region']
author: Lester Wade author: "Lester Wade (@lwade)"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''

View file

@ -107,7 +107,7 @@ options:
default: present default: present
choices: ['absent', 'present', 'list'] choices: ['absent', 'present', 'list']
version_added: "1.6" version_added: "1.6"
author: Lester Wade author: "Lester Wade (@lwade)"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''
@ -160,8 +160,8 @@ EXAMPLES = '''
instance: "{{ item.id }}" instance: "{{ item.id }}"
name: my_existing_volume_Name_tag name: my_existing_volume_Name_tag
device_name: /dev/xvdf device_name: /dev/xvdf
with_items: ec2.instances with_items: ec2.instances
register: ec2_vol register: ec2_vol
# Remove a volume # Remove a volume
- ec2_vol: - ec2_vol:
@ -239,15 +239,14 @@ def get_volumes(module, ec2):
return vols return vols
def delete_volume(module, ec2): def delete_volume(module, ec2):
vol = get_volume(module, ec2) volume_id = module.params['id']
if not vol: try:
module.exit_json(changed=False) ec2.delete_volume(volume_id)
else: module.exit_json(changed=True)
if vol.attachment_state() is not None: except boto.exception.EC2ResponseError as ec2_error:
adata = vol.attach_data if ec2_error.code == 'InvalidVolume.NotFound':
module.fail_json(msg="Volume %s is attached to an instance %s." % (vol.id, adata.instance_id)) module.exit_json(changed=False)
ec2.delete_volume(vol.id) module.fail_json(msg=ec2_error.message)
module.exit_json(changed=True)
def boto_supports_volume_encryption(): def boto_supports_volume_encryption():
""" """
@ -437,11 +436,11 @@ def main():
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes # Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size # without needing to pass an unused volume_size
if not volume_size and not (id or name): if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify an existing volume with id or name or a volume_size") module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and id: if volume_size and (id or snapshot):
module.fail_json(msg="Cannot specify volume_size and id") module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
if state == 'absent': if state == 'absent':
delete_volume(module, ec2) delete_volume(module, ec2)

View file

@ -58,7 +58,7 @@ options:
aliases: [] aliases: []
resource_tags: resource_tags:
description: description:
- 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.' - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.'
required: true required: true
default: null default: null
aliases: [] aliases: []
@ -72,7 +72,7 @@ options:
aliases: [] aliases: []
route_tables: route_tables:
description: description:
- 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.'
required: false required: false
default: null default: null
aliases: [] aliases: []
@ -100,7 +100,7 @@ options:
required: true required: true
default: null default: null
aliases: ['aws_region', 'ec2_region'] aliases: ['aws_region', 'ec2_region']
author: Carson Gee author: "Carson Gee (@carsongee)"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''
@ -499,6 +499,9 @@ def create_vpc(module, vpc_conn):
for rt in route_tables: for rt in route_tables:
try: try:
new_rt = vpc_conn.create_route_table(vpc.id) new_rt = vpc_conn.create_route_table(vpc.id)
new_rt_tags = rt.get('resource_tags', None)
if new_rt_tags:
vpc_conn.create_tags(new_rt.id, new_rt_tags)
for route in rt['routes']: for route in rt['routes']:
route_kwargs = {} route_kwargs = {}
if route['gw'] == 'igw': if route['gw'] == 'igw':

295
cloud/amazon/ec2_vpc_net.py Normal file
View file

@ -0,0 +1,295 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
author: Jonathan Davila (@defionscode)
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
choices: [ 'default', 'dedicated' ]
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dhcp_opts_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
choices: [ 'present', 'absent' ]
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
'''
import time
import sys
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns True or False in regards to the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return false.
"""
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if len(matching_vpcs) == 1:
matched_vpc = matching_vpcs[0]
elif len(matching_vpcs) > 1:
if multi:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
if cmp(tags, current_tags):
vpc.create_tags(vpc_obj.id, tags)
return True
else:
return False
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj.dhcp_options_id != dhcp_id:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
vpc_values = vpc_obj.__dict__
if "region" in vpc_values:
vpc_values.pop("region")
if "item" in vpc_values:
vpc_values.pop("item")
if "connection" in vpc_values:
vpc_values.pop("connection")
return vpc_values
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is None:
try:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError, e:
module.fail_json(msg=e)
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is not None:
try:
connection.delete_vpc(vpc_obj.id)
vpc_obj = None
changed = True
except BotoServerError, e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

View file

@ -22,7 +22,7 @@ description:
- Manage cache clusters in Amazon Elasticache. - Manage cache clusters in Amazon Elasticache.
- Returns information about the specified cache cluster. - Returns information about the specified cache cluster.
version_added: "1.4" version_added: "1.4"
author: Jim Dalton author: "Jim Dalton (@jsdalton)"
options: options:
state: state:
description: description:
@ -42,7 +42,7 @@ options:
description: description:
- The version number of the cache engine - The version number of the cache engine
required: false required: false
default: 1.4.14 default: none
node_type: node_type:
description: description:
- The compute and memory capacity of the nodes in the cache cluster - The compute and memory capacity of the nodes in the cache cluster
@ -485,11 +485,11 @@ def main():
state={'required': True, 'choices': ['present', 'absent', 'rebooted']}, state={'required': True, 'choices': ['present', 'absent', 'rebooted']},
name={'required': True}, name={'required': True},
engine={'required': False, 'default': 'memcached'}, engine={'required': False, 'default': 'memcached'},
cache_engine_version={'required': False, 'default': '1.4.14'}, cache_engine_version={'required': False},
node_type={'required': False, 'default': 'cache.m1.small'}, node_type={'required': False, 'default': 'cache.m1.small'},
num_nodes={'required': False, 'default': None, 'type': 'int'}, num_nodes={'required': False, 'default': None, 'type': 'int'},
cache_port={'required': False, 'default': 11211, 'type': 'int'},
parameter_group={'required': False, 'default': None}, parameter_group={'required': False, 'default': None},
cache_port={'required': False, 'type': 'int'},
cache_subnet_group={'required': False, 'default': None}, cache_subnet_group={'required': False, 'default': None},
cache_security_groups={'required': False, 'default': [default], cache_security_groups={'required': False, 'default': [default],
'type': 'list'}, 'type': 'list'},

View file

@ -0,0 +1,157 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: elasticache_subnet_group
version_added: "2.0"
short_description: manage Elasticache subnet groups
description:
- Creates, modifies, and deletes Elasticache subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
description:
description:
- Elasticache subnet group description. Only set when a new group is added.
required: false
default: null
subnets:
description:
- List of subnet IDs that make up the Elasticache subnet group.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
aliases: ['aws_region', 'ec2_region']
author: "Tim Mahoney (@timmahoney)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Add or change a subnet group
- elasticache_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a subnet group
- elasticache_subnet_group:
state: absent
name: norwegian-blue
'''
try:
import boto
from boto.elasticache.layer1 import ElastiCacheConnection
from boto.regioninfo import RegionInfo
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
"""Get an elasticache connection"""
try:
endpoint = "elasticache.%s.amazonaws.com" % region
connect_region = RegionInfo(name=region, endpoint=endpoint)
conn = ElastiCacheConnection(region=connect_region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=e.message)
try:
changed = False
exists = False
try:
matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError, e:
if e.error_code != 'CacheSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_cache_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
else:
changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError, e:
if e.error_message != 'No modifications were requested.':
module.fail_json(msg = e.error_message)
else:
changed = False
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

714
cloud/amazon/iam.py Normal file
View file

@ -0,0 +1,714 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam
short_description: Manage IAM users, groups, roles and keys
description:
- Allows for the management of IAM users, groups, roles and access keys.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
required: true
default: null
choices: [ "user", "group", "role"]
name:
description:
- Name of IAM resource to create or identify
required: true
new_name:
description:
- When state is update, will replace name with new_name on IAM resource
required: false
default: null
new_path:
description:
- When state is update, will replace the path with new_path on the IAM resource
required: false
default: null
state:
description:
- Whether to create, delete or update the IAM resource. Note, roles cannot be updated.
required: true
default: null
choices: [ "present", "absent", "update" ]
path:
description:
- When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match.
required: false
default: "/"
access_key_state:
description:
- When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified.
required: false
default: null
choices: [ "create", "remove", "active", "inactive"]
key_count:
description:
- When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1.
required: false
default: '1'
access_key_ids:
description:
- A list of the keys that you want impacted by the access_key_state paramter.
groups:
description:
- A list of groups the user should belong to. When update, will gracefully remove groups not listed.
required: false
default: null
password:
description:
- When type is user and state is present, define the users login password. Also works with update. Note that always returns changed.
required: false
default: null
update_password:
required: false
default: always
choices: ['always', 'on_create']
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.'
author:
- "Jonathan I. Davila (@defionscode)"
- "Paul Seiffert (@seiffert)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic user creation example
tasks:
- name: Create two new IAM users with API keys
iam:
iam_type: user
name: "{{ item }}"
state: present
password: "{{ temp_pass }}"
access_key_state: create
with_items:
- jcleese
- mpython
# Advanced example, create two new groups and add the pre-existing user
# jdavila to both groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
with_items:
- Mario
- Luigi
register: new_groups
- name:
iam:
iam_type: user
name: jdavila
state: update
groups: "{{ item.created_group.group_name }}"
with_items: new_groups.results
'''
import json
import itertools
import sys
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def create_user(module, iam, name, pwd, path, key_state, key_count):
key_qty = 0
keys = []
try:
user_meta = iam.create_user(
name, path).create_user_response.create_user_result.user
changed = True
if pwd is not None:
pwd = iam.create_login_profile(name, pwd)
if key_state in ['create']:
if key_count:
while key_count > key_qty:
keys.append(iam.create_access_key(
user_name=name).create_access_key_response.\
create_access_key_result.\
access_key)
key_qty += 1
else:
keys = None
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
else:
user_info = dict(created_user=user_meta, password=pwd, access_keys=keys)
return (user_info, changed)
def delete_user(module, iam, name):
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
for key in current_keys:
iam.delete_access_key(key, name)
del_meta = iam.delete_user(name).delete_user_response
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names:
iam.delete_user_policy(name, policy)
try:
del_meta = iam.delete_user(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return del_meta, name, changed
else:
changed = True
return del_meta, name, changed
def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated):
changed = False
name_change = False
if updated and new_name:
name = new_name
try:
current_keys, status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
key_qty = len(current_keys)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if 'cannot be found' in error_msg and updated:
current_keys, status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
name = new_name
else:
module.fail_json(changed=False, msg=str(err))
updated_key_list = {}
if new_name or new_path:
c_path = iam.get_user(name).get_user_result.user['path']
if (name != new_name) or (c_path != new_path):
changed = True
try:
if not updated:
user = iam.update_user(
name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata
else:
user = iam.update_user(
name, new_path=new_path).update_user_response.response_metadata
user['updates'] = dict(
old_username=name, new_username=new_name, old_path=c_path, new_path=new_path)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
module.fail_json(changed=False, msg=str(err))
else:
if not updated:
name_change = True
if pwd:
try:
iam.update_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError:
try:
iam.create_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError, err:
error_msg = boto_exception(str(err))
if 'Password does not conform to the account password policy' in error_msg:
module.fail_json(changed=False, msg="Passsword doesn't conform to policy")
else:
module.fail_json(msg=error_msg)
else:
try:
iam.delete_login_profile(name)
changed = True
except boto.exception.BotoServerError:
pass
if key_state == 'create':
try:
while key_count > key_qty:
new_key = iam.create_access_key(
user_name=name).create_access_key_response.create_access_key_result.access_key
key_qty += 1
changed = True
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
if keys and key_state:
for access_key in keys:
if access_key in current_keys:
for current_key, current_key_state in zip(current_keys, status):
if key_state != current_key_state.lower():
try:
iam.update_access_key(
access_key, key_state.capitalize(), user_name=name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
if key_state == 'remove':
try:
iam.delete_access_key(access_key, user_name=name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
try:
final_keys, final_key_status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata]
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
for fk, fks in zip(final_keys, final_key_status):
updated_key_list.update({fk: fks})
return name_change, updated_key_list, changed
def set_users_groups(module, iam, name, groups, updated=None,
new_name=None):
""" Sets groups for a user, will purge groups not explictly passed, while
retaining pre-existing groups that also are in the new list.
"""
changed = False
if updated:
name = new_name
try:
orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user(
name).list_groups_for_user_result.groups]
remove_groups = [
rg for rg in frozenset(orig_users_groups).difference(groups)]
new_groups = [
ng for ng in frozenset(groups).difference(orig_users_groups)]
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
if len(orig_users_groups) > 0:
for new in new_groups:
iam.add_user_to_group(new, name)
for rm in remove_groups:
iam.remove_user_from_group(rm, name)
else:
for group in groups:
try:
iam.add_user_to_group(group, name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('The group with name %s cannot be found.' % group) in error_msg:
module.fail_json(changed=False, msg="Group %s doesn't exist" % group)
if len(remove_groups) > 0 or len(new_groups) > 0:
changed = True
return (groups, changed)
def create_group(module=None, iam=None, name=None, path=None):
changed = False
try:
iam.create_group(
name, path).create_group_response.create_group_result.group
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return name, changed
def delete_group(module=None, iam=None, name=None):
changed = False
try:
iam.delete_group(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names:
iam.delete_group_policy(name, policy)
try:
iam.delete_group(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
return changed, name
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
changed = False
try:
current_group_path = iam.get_group(
name).get_group_response.get_group_result.group['path']
if new_path:
if current_group_path != new_path:
iam.update_group(name, new_path=new_path)
changed = True
if new_name:
if name != new_name:
iam.update_group(name, new_group_name=new_name, new_path=new_path)
changed = True
name = new_name
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
return changed, name, new_path, current_group_path
def create_role(module, iam, name, path, role_list, prof_list):
changed = False
try:
if name not in role_list:
changed = True
iam.create_role(
name, path=path).create_role_response.create_role_result.role.role_name
if name not in prof_list:
iam.create_instance_profile(name, path=path)
iam.add_role_to_instance_profile(name, name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response.
list_roles_result.roles]
return changed, updated_role_list
def delete_role(module, iam, name, role_list, prof_list):
changed = False
try:
if name in role_list:
cur_ins_prof = [rp['instance_profile_name'] for rp in
iam.list_instance_profiles_for_role(name).
list_instance_profiles_for_role_result.
instance_profiles]
for profile in cur_ins_prof:
iam.remove_role_from_instance_profile(profile, name)
try:
iam.delete_role(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.list_role_policies(name).list_role_policies_result.policy_names:
iam.delete_role_policy(name, policy)
try:
iam.delete_role(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
for prof in prof_list:
if name == prof:
iam.delete_instance_profile(name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response.
list_roles_result.roles]
return changed, updated_role_list
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
groups=dict(type='list', default=None, required=False),
state=dict(
default=None, required=True, choices=['present', 'absent', 'update']),
password=dict(default=None, required=False, no_log=True),
update_password=dict(default='always', required=False, choices=['always', 'on_create']),
access_key_state=dict(default=None, required=False, choices=[
'active', 'inactive', 'create', 'remove',
'Active', 'Inactive', 'Create', 'Remove']),
access_key_ids=dict(type='list', default=None, required=False),
key_count=dict(type='int', default=1, required=False),
name=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg='This module requires boto, please install it')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
groups = module.params.get('groups')
name = module.params.get('name')
new_name = module.params.get('new_name')
password = module.params.get('password')
update_pw = module.params.get('update_password')
path = module.params.get('path')
new_path = module.params.get('new_path')
key_count = module.params.get('key_count')
key_state = module.params.get('access_key_state')
if key_state:
key_state = key_state.lower()
if any([n in key_state for n in ['active', 'inactive']]) and not key_ids:
module.fail_json(changed=False, msg="At least one access key has to be defined in order"
" to use 'active' or 'inactive'")
key_ids = module.params.get('access_key_ids')
if iam_type == 'user' and module.params.get('password') is not None:
pwd = module.params.get('password')
elif iam_type != 'user' and module.params.get('password') is not None:
module.fail_json(msg="a password is being specified when the iam_type "
"is not user. Check parameters")
else:
pwd = None
if iam_type != 'user' and (module.params.get('access_key_state') is not None or
module.params.get('access_key_id') is not None):
module.fail_json(msg="the IAM type must be user, when IAM access keys "
"are being modified. Check parameters")
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
"please specificy present or absent")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
result = {}
changed = False
orig_group_list = [gl['group_name'] for gl in iam.get_all_groups().
list_groups_result.
groups]
orig_user_list = [ul['user_name'] for ul in iam.get_all_users().
list_users_result.
users]
orig_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response.
list_roles_result.
roles]
orig_prof_list = [ap['instance_profile_name'] for ap in iam.list_instance_profiles().
list_instance_profiles_response.
list_instance_profiles_result.
instance_profiles]
if iam_type == 'user':
been_updated = False
user_groups = None
user_exists = any([n in [name, new_name] for n in orig_user_list])
if user_exists:
current_path = iam.get_user(name).get_user_result.user['path']
if not new_path and current_path != path:
new_path = path
path = current_path
if state == 'present' and not user_exists and not new_name:
(meta, changed) = create_user(
module, iam, name, password, path, key_state, key_count)
keys = iam.get_all_access_keys(name).list_access_keys_result.\
access_key_metadata
if groups:
(user_groups, changed) = set_users_groups(
module, iam, name, groups, been_updated, new_name)
module.exit_json(
user_meta=meta, groups=user_groups, keys=keys, changed=changed)
elif state in ['present', 'update'] and user_exists:
if update_pw == 'on_create':
password = None
if name not in orig_user_list and new_name in orig_user_list:
been_updated = True
name_change, key_list, user_changed = update_user(
module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated)
if name_change and new_name:
orig_name = name
name = new_name
if groups:
user_groups, groups_changed = set_users_groups(
module, iam, name, groups, been_updated, new_name)
if groups_changed == user_changed:
changed = groups_changed
else:
changed = True
else:
changed = user_changed
if new_name and new_path:
module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name,
new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list)
elif new_name and not new_path and not been_updated:
module.exit_json(
changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list)
elif new_name and not new_path and been_updated:
module.exit_json(
changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state)
elif not new_name and new_path:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path, keys=key_list)
else:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, keys=key_list)
elif state == 'update' and not user_exists:
module.fail_json(
msg="The user %s does not exit. No update made." % name)
elif state == 'absent':
if name in orig_user_list:
set_users_groups(module, iam, name, '')
del_meta, name, changed = delete_user(module, iam, name)
module.exit_json(
deletion_meta=del_meta, deleted_user=name, changed=changed)
else:
module.exit_json(
changed=False, msg="User %s is already absent from your AWS IAM users" % name)
elif iam_type == 'group':
group_exists = name in orig_group_list
if state == 'present' and not group_exists:
new_group, changed = create_group(iam=iam, name=name, path=path)
module.exit_json(changed=changed, group_name=new_group)
elif state in ['present', 'update'] and group_exists:
changed, updated_name, updated_path, cur_path = update_group(
iam=iam, name=name, new_name=new_name, new_path=new_path)
if new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, old_path=cur_path,
new_group_path=updated_path)
if new_path and not new_name:
module.exit_json(changed=changed, group_name=name,
old_path=cur_path,
new_group_path=updated_path)
if not new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, group_path=cur_path)
if not new_path and not new_name:
module.exit_json(
changed=changed, group_name=name, group_path=cur_path)
elif state == 'update' and not group_exists:
module.fail_json(
changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name)
elif state == 'absent':
if name in orig_group_list:
removed_group, changed = delete_group(iam=iam, name=name)
module.exit_json(changed=changed, delete_group=removed_group)
else:
module.exit_json(changed=changed, msg="Group already absent")
elif iam_type == 'role':
role_list = []
if state == 'present':
changed, role_list = create_role(
module, iam, name, path, orig_role_list, orig_prof_list)
elif state == 'absent':
changed, role_list = delete_role(
module, iam, name, orig_role_list, orig_prof_list)
elif state == 'update':
module.fail_json(
changed=False, msg='Role update not currently supported by boto.')
module.exit_json(changed=changed, roles=role_list)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

294
cloud/amazon/iam_cert.py Normal file
View file

@ -0,0 +1,294 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_cert
short_description: Manage server certificates for use on ELBs and CloudFront
description:
- Allows for the management of server certificates
version_added: "2.0"
options:
name:
description:
- Name of certificate to add, update or remove.
required: true
aliases: []
new_name:
description:
- When present, this will update the name of the cert with the value passed here.
required: false
aliases: []
new_path:
description:
- When present, this will update the path of the cert with the value passed here.
required: false
aliases: []
state:
description:
- Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified.
required: true
default: null
choices: [ "present", "absent" ]
aliases: []
path:
description:
- When creating or updating, specify the desired path of the certificate
required: false
default: "/"
aliases: []
cert_chain:
description:
- The path to the CA certificate chain in PEM encoded format.
required: false
default: null
aliases: []
cert:
description:
- The path to the certificate body in PEM encoded format.
required: false
aliases: []
key:
description:
- The path to the private key of the certificate in PEM encoded format.
dup_ok:
description:
- By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique.
required: false
default: False
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
requirements: [ "boto" ]
author: Jonathan I. Davila
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic server certificate upload
tasks:
- name: Upload Certifcate
iam_cert:
name: very_ssl
state: present
cert: somecert.pem
key: privcertkey
cert_chain: myverytrustedchain
'''
import json
import sys
try:
import boto
import boto.iam
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def cert_meta(iam, name):
opath = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
path
ocert = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
certificate_body
ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
server_certificate_id
upload_date = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
upload_date
exp = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
expiration
return opath, ocert, ocert_id, upload_date, exp
def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
update=False
if any(ct in orig_cert_names for ct in [name, new_name]):
for i_name in [name, new_name]:
if i_name is None:
continue
if cert is not None:
try:
c_index=orig_cert_names.index(i_name)
except NameError:
continue
else:
if orig_cert_bodies[c_index] == cert:
update=True
break
elif orig_cert_bodies[c_index] != cert:
module.fail_json(changed=False, msg='A cert with the name %s already exists and'
' has a different certificate body associated'
' with it. Certifcates cannot have the same name')
else:
update=True
break
elif cert in orig_cert_bodies and not dup_ok:
for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
if crt_body == cert:
module.fail_json(changed=False, msg='This certificate already'
' exists under the name %s' % crt_name)
return update
def cert_action(module, iam, name, cpath, new_name, new_path, state,
cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok):
if state == 'present':
update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
orig_cert_bodies, dup_ok)
if update:
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
changed=True
if new_name and new_path:
iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif new_name and not new_path:
iam.update_server_cert(name, new_cert_name=new_name)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif not new_name and new_path:
iam.update_server_cert(name, new_path=new_path)
module.exit_json(changed=changed, name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
else:
changed=False
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp,
msg='No new path or name specified. No changes made')
else:
changed=True
iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath)
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif state == 'absent':
if name in orig_cert_names:
changed=True
iam.delete_server_cert(name)
module.exit_json(changed=changed, deleted_cert=name)
else:
changed=False
module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(
default=None, required=True, choices=['present', 'absent']),
name=dict(default=None, required=False),
cert=dict(default=None, required=False),
key=dict(default=None, required=False),
cert_chain=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False),
dup_ok=dict(default=False, required=False, choices=[False, True])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module")
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
try:
iam = boto.iam.connection.IAMConnection(
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
state = module.params.get('state')
name = module.params.get('name')
path = module.params.get('path')
new_name = module.params.get('new_name')
new_path = module.params.get('new_path')
cert_chain = module.params.get('cert_chain')
dup_ok = module.params.get('dup_ok')
if state == 'present':
cert = open(module.params.get('cert'), 'r').read().rstrip()
key = open(module.params.get('key'), 'r').read().rstrip()
if cert_chain is not None:
cert_chain = open(module.params.get('cert_chain'), 'r').read()
else:
key=cert=chain=None
orig_certs = [ctb['server_certificate_name'] for ctb in \
iam.get_all_server_certs().\
list_server_certificates_result.\
server_certificate_metadata_list]
orig_bodies = [iam.get_server_certificate(thing).\
get_server_certificate_result.\
certificate_body \
for thing in orig_certs]
if new_name == name:
new_name = None
if new_path == path:
new_path = None
changed = False
try:
cert_action(module, iam, name, path, new_name, new_path, state,
cert, key, cert_chain, orig_certs, orig_bodies, dup_ok)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err), debug=[cert,key])
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()

353
cloud/amazon/iam_policy.py Normal file
View file

@ -0,0 +1,353 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_policy
short_description: Manage IAM policies for users, groups, and roles
description:
- Allows uploading or removing IAM policies for IAM users, groups or roles.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
required: true
default: null
choices: [ "user", "group", "role"]
aliases: []
iam_name:
description:
- Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
required: true
aliases: []
policy_name:
description:
- The name label for the policy to create or remove.
required: false
aliases: []
policy_document:
description:
- The path to the properly json formatted policy file (mutually exclusive with C(policy_json))
required: false
aliases: []
policy_json:
description:
- A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
required: false
aliases: []
state:
description:
- Whether to create or delete the IAM policy.
required: true
default: null
choices: [ "present", "absent"]
aliases: []
skip_duplicates:
description:
- By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
required: false
default: "/"
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
requirements: [ "boto" ]
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.'
author: "Jonathan I. Davila (@defionscode)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Create and policy with the name of 'Admin' to the group 'administrators'
tasks:
- name: Create two new IAM users with API keys
iam_policy:
iam_type: group
iam_name: administrators
policy_name: Admin
state: present
policy_document: admin_policy.json
# Advanced example, create two new groups and add a READ-ONLY policy to both
# groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
with_items:
- Mario
- Luigi
register: new_groups
- name:
iam_policy:
iam_type: group
iam_name: "{{ item.created_group.group_name }}"
policy_name: "READ-ONLY"
policy_document: readonlypolicy.json
state: present
with_items: new_groups.results
# Create a new S3 policy with prefix per user
tasks:
- name: Create S3 policy from template
iam_policy:
iam_type: user
iam_name: "{{ item.user }}"
policy_name: "s3_limited_access_{{ item.prefix }}"
state: present
policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} "
with_items:
- user: s3_user
prefix: s3_user_prefix
'''
import json
import urllib
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def user_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
for pol in current_policies:
'''
urllib is needed here because boto returns url encoded strings instead
'''
if urllib.unquote(iam.get_user_policy(name, pol).
get_user_policy_result.policy_document) == pdoc:
policy_match = True
if policy_match:
msg=("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present' and skip:
if policy_name not in current_policies and not policy_match:
changed = True
iam.put_user_policy(name, policy_name, pdoc)
elif state == 'present' and not skip:
changed = True
iam.put_user_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_user_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def role_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
for pol in current_policies:
if urllib.unquote(iam.get_role_policy(name, pol).
get_role_policy_result.policy_document) == pdoc:
policy_match = True
if policy_match:
msg=("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present' and skip:
if policy_name not in current_policies and not policy_match:
changed = True
iam.put_role_policy(name, policy_name, pdoc)
elif state == 'present' and not skip:
changed = True
iam.put_role_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_role_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def group_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
msg=''
try:
current_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
for pol in current_policies:
if urllib.unquote(iam.get_group_policy(name, pol).
get_group_policy_result.policy_document) == pdoc:
policy_match = True
if policy_match:
msg=("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present' and skip:
if policy_name not in current_policies and not policy_match:
changed = True
iam.put_group_policy(name, policy_name, pdoc)
elif state == 'present' and not skip:
changed = True
iam.put_group_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_group_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies, msg
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
state=dict(
default=None, required=True, choices=['present', 'absent']),
iam_name=dict(default=None, required=False),
policy_name=dict(default=None, required=True),
policy_document=dict(default=None, required=False),
policy_json=dict(type='str', default=None, required=False),
skip_duplicates=dict(type='bool', default=True, required=False)
))
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
state = module.params.get('state')
name = module.params.get('iam_name')
policy_name = module.params.get('policy_name')
skip = module.params.get('skip_duplicates')
if module.params.get('policy_document') != None and module.params.get('policy_json') != None:
module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set')
if module.params.get('policy_document') != None:
with open(module.params.get('policy_document'), 'r') as json_data:
pdoc = json.dumps(json.load(json_data))
json_data.close()
elif module.params.get('policy_json') != None:
try:
pdoc = json.dumps(json.loads(module.params.get('policy_json')))
except Exception as e:
module.fail_json(msg=str(e) + '\n' + module.params.get('policy_json'))
else:
pdoc=None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
changed = False
if iam_type == 'user':
changed, user_name, current_policies = user_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, user_name=name, policies=current_policies)
elif iam_type == 'role':
changed, role_name, current_policies = role_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, role_name=name, policies=current_policies)
elif iam_type == 'group':
changed, group_name, current_policies, msg = group_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

114
cloud/amazon/rds.py Executable file → Normal file
View file

@ -26,82 +26,68 @@ options:
description: description:
- Specifies the action to take. - Specifies the action to take.
required: true required: true
default: null choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
aliases: []
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ]
instance_name: instance_name:
description: description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot - Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false required: false
default: null default: null
aliases: []
source_instance: source_instance:
description: description:
- Name of the database to replicate. Used only when command=replicate. - Name of the database to replicate. Used only when command=replicate.
required: false required: false
default: null default: null
aliases: []
db_engine: db_engine:
description: description:
- The type of database. Used only when command=create. - The type of database. Used only when command=create.
required: false required: false
default: null default: null
aliases: []
choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'] choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres']
size: size:
description: description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify. - Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false required: false
default: null default: null
aliases: []
instance_type: instance_type:
description: description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false required: false
default: null default: null
aliases: []
username: username:
description: description:
- Master database username. Used only when command=create. - Master database username. Used only when command=create.
required: false required: false
default: null default: null
aliases: []
password: password:
description: description:
- Password for the master database username. Used only when command=create or command=modify. - Password for the master database username. Used only when command=create or command=modify.
required: false required: false
default: null default: null
aliases: []
region: region:
description: description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ] aliases: [ 'aws_region', 'ec2_region' ]
db_name: db_name:
description: description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create. - Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false required: false
default: null default: null
aliases: []
engine_version: engine_version:
description: description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used. - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false required: false
default: null default: null
aliases: []
parameter_group: parameter_group:
description: description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify. - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false required: false
default: null default: null
aliases: []
license_model: license_model:
description: description:
- The license model for this DB instance. Used only when command=create or command=restore. - The license model for this DB instance. Used only when command=create or command=restore.
required: false required: false
default: null default: null
aliases: []
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ] choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone: multi_zone:
description: description:
@ -109,62 +95,52 @@ options:
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
required: false required: false
default: null default: null
aliases: []
iops: iops:
description: description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000. - Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false required: false
default: null default: null
aliases: []
security_groups: security_groups:
description: description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify. - Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false required: false
default: null default: null
aliases: []
vpc_security_groups: vpc_security_groups:
description: description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify. - Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false required: false
default: null default: null
aliases: []
port: port:
description: description:
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate. - Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
required: false required: false
default: null default: null
aliases: []
upgrade: upgrade:
description: description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate. - Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false required: false
default: no default: no
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
aliases: []
option_group: option_group:
description: description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create. - The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false required: false
default: null default: null
aliases: []
maint_window: maint_window:
description: description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify." - "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false required: false
default: null default: null
aliases: []
backup_window: backup_window:
description: description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify. - Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false required: false
default: null default: null
aliases: []
backup_retention: backup_retention:
description: description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify." - "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false required: false
default: null default: null
aliases: []
zone: zone:
description: description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore. - availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
@ -176,18 +152,15 @@ options:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create. - VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false required: false
default: null default: null
aliases: []
snapshot: snapshot:
description: description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot. - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false required: false
default: null default: null
aliases: []
aws_secret_key: aws_secret_key:
description: description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ] aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key: aws_access_key:
description: description:
@ -201,50 +174,53 @@ options:
required: false required: false
default: "no" default: "no"
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
aliases: []
wait_timeout: wait_timeout:
description: description:
- how long before wait gives up, in seconds - how long before wait gives up, in seconds
default: 300 default: 300
aliases: []
apply_immediately: apply_immediately:
description: description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no default: no
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
aliases: [] force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.0"
new_instance_name: new_instance_name:
description: description:
- Name to rename an instance to. Used only when command=modify. - Name to rename an instance to. Used only when command=modify.
required: false required: false
default: null default: null
aliases: [] version_added: "1.5"
version_added: 1.5
character_set_name: character_set_name:
description: description:
- Associate the DB instance with a specified character set. Used with command=create. - Associate the DB instance with a specified character set. Used with command=create.
required: false required: false
default: null default: null
aliases: [] version_added: "1.9"
version_added: 1.9
publicly_accessible: publicly_accessible:
description: description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0 - explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false required: false
default: null default: null
aliases: [] version_added: "1.9"
version_added: 1.9
tags: tags:
description: description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 - tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false required: false
default: null default: null
aliases: [] version_added: "1.9"
version_added: 1.9
requirements: requirements:
- "python >= 2.6" - "python >= 2.6"
- "boto" - "boto"
author: Bruce Pennypacker, Will Thames author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
''' '''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD # FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
@ -289,6 +265,13 @@ EXAMPLES = '''
instance_name: new-database instance_name: new-database
new_instance_name: renamed-database new_instance_name: renamed-database
wait: yes wait: yes
# Reboot an instance and wait for it to become available again
- rds
command: reboot
instance_name: database
wait: yes
''' '''
import sys import sys
@ -377,6 +360,13 @@ class RDSConnection:
except boto.exception.BotoServerError, e: except boto.exception.BotoServerError, e:
raise RDSException(e) raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try: try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
@ -461,6 +451,13 @@ class RDS2Connection:
except boto.exception.BotoServerError, e: except boto.exception.BotoServerError, e:
raise RDSException(e) raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try: try:
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
@ -624,6 +621,8 @@ def await_resource(conn, resource, status, module):
if resource.name is None: if resource.name is None:
module.fail_json(msg="Problem with instance %s" % resource.instance) module.fail_json(msg="Problem with instance %s" % resource.instance)
resource = conn.get_db_instance(resource.name) resource = conn.get_db_instance(resource.name)
if resource is None:
break
return resource return resource
@ -710,7 +709,10 @@ def delete_db_instance_or_snapshot(module, conn):
if instance_name: if instance_name:
if snapshot: if snapshot:
params["skip_final_snapshot"] = False params["skip_final_snapshot"] = False
params["final_snapshot_id"] = snapshot if has_rds2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else: else:
params["skip_final_snapshot"] = True params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params) result = conn.delete_db_instance(instance_name, **params)
@ -839,6 +841,31 @@ def snapshot_db_instance(module, conn):
module.exit_json(changed=changed, snapshot=resource.get_data()) module.exit_json(changed=changed, snapshot=resource.get_data())
def reboot_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = []
if has_rds2:
valid_vars.append('force_failover')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
changed = False
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def restore_db_instance(module, conn): def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot'] required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone', valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
@ -910,6 +937,7 @@ def validate_parameters(required_vars, valid_vars, module):
'instance_type': 'db_instance_class', 'instance_type': 'db_instance_class',
'password': 'master_user_password', 'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier', 'new_instance_name': 'new_db_instance_identifier',
'force_failover': 'force_failover',
} }
if has_rds2: if has_rds2:
optional_params.update(optional_params_rds2) optional_params.update(optional_params_rds2)
@ -952,7 +980,7 @@ def validate_parameters(required_vars, valid_vars, module):
def main(): def main():
argument_spec = ec2_argument_spec() argument_spec = ec2_argument_spec()
argument_spec.update(dict( argument_spec.update(dict(
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True), command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name = dict(required=False), instance_name = dict(required=False),
source_instance = dict(required=False), source_instance = dict(required=False),
db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False),
@ -984,6 +1012,7 @@ def main():
tags = dict(type='dict', required=False), tags = dict(type='dict', required=False),
publicly_accessible = dict(required=False), publicly_accessible = dict(required=False),
character_set_name = dict(required=False), character_set_name = dict(required=False),
force_failover = dict(type='bool', required=False, default=False)
) )
) )
@ -1002,6 +1031,7 @@ def main():
'modify': modify_db_instance, 'modify': modify_db_instance,
'promote': promote_db_instance, 'promote': promote_db_instance,
'snapshot': snapshot_db_instance, 'snapshot': snapshot_db_instance,
'reboot': reboot_db_instance,
'restore': restore_db_instance, 'restore': restore_db_instance,
} }

View file

@ -67,7 +67,7 @@ options:
required: true required: true
default: null default: null
aliases: ['aws_region', 'ec2_region'] aliases: ['aws_region', 'ec2_region']
author: Scott Anderson author: "Scott Anderson (@tastychutney)"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''

View file

@ -53,7 +53,7 @@ options:
required: true required: true
default: null default: null
aliases: ['aws_region', 'ec2_region'] aliases: ['aws_region', 'ec2_region']
author: Scott Anderson author: "Scott Anderson (@tastychutney)"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''
@ -138,10 +138,14 @@ def main():
else: else:
if not exists: if not exists:
new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets) new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets)
changed = True
else: else:
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) # Sort the subnet groups before we compare them
matching_groups[0].subnet_ids.sort()
group_subnets.sort()
if ( (matching_groups[0].name != group_name) or (matching_groups[0].description != group_description) or (matching_groups[0].subnet_ids != group_subnets) ):
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError, e: except BotoServerError, e:
module.fail_json(msg = e.error_message) module.fail_json(msg = e.error_message)

View file

@ -35,6 +35,12 @@ options:
required: true required: true
default: null default: null
aliases: [] aliases: []
hosted_zone_id:
description:
- The Hosted Zone ID of the DNS zone to modify
required: false
version_added: 2.0
default: null
record: record:
description: description:
- The full DNS record to create or delete - The full DNS record to create or delete
@ -93,7 +99,53 @@ options:
required: false required: false
default: false default: false
version_added: "1.9" version_added: "1.9"
author: Bruce Pennypacker identifier:
description:
- Weighted and latency-based resource record sets only. An identifier
that differentiates among multiple resource record sets that have the
same combination of DNS name and type.
required: false
default: null
version_added: "2.0"
weight:
description:
- Weighted resource record sets only. Among resource record sets that
have the same combination of DNS name and type, a value that
determines what portion of traffic for the current resource record set
is routed to the associated location.
required: false
default: null
version_added: "2.0"
region:
description:
- Latency-based resource record sets only Among resource record sets
that have the same combination of DNS name and type, a value that
determines which region this should be associated with for the
latency-based routing
required: false
default: null
version_added: "2.0"
health_check:
description:
- Health check to associate with this record
required: false
default: null
version_added: "2.0"
failover:
description:
- Failover resource record sets only. Whether this is the primary or
secondary resource record set.
required: false
default: null
version_added: "2.0"
vpc_id:
description:
- "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC."
- This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs.
required: false
default: null
version_added: "2.0"
author: "Bruce Pennypacker (@bpennypacker)"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''
@ -156,6 +208,40 @@ EXAMPLES = '''
alias=True alias=True
alias_hosted_zone_id="{{ elb_zone_id }}" alias_hosted_zone_id="{{ elb_zone_id }}"
# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53:
command: "create"
zone: "foo.com"
hosted_zone_id: "Z2AABBCCDDEEFF"
record: "localhost.foo.com"
type: "AAAA"
ttl: "7200"
value: "::1"
# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53:
command: "create"
zone: "foo.com"
hosted_zone_id: "Z2AABBCCDDEEFF"
record: "localhost.foo.com"
type: "AAAA"
ttl: "7200"
value: "::1"
# Use a routing policy to distribute traffic:
- route53:
command: "create"
zone: "foo.com"
record: "www.foo.com"
type: "CNAME"
value: "host1.foo.com"
ttl: 30
# Routing policy
identifier: "host1@www"
weight: 100
health_check: "d994b780-3150-49fd-9205-356abdd42e75"
''' '''
@ -166,11 +252,33 @@ try:
import boto.ec2 import boto.ec2
from boto import route53 from boto import route53
from boto.route53 import Route53Connection from boto.route53 import Route53Connection
from boto.route53.record import ResourceRecordSets from boto.route53.record import Record, ResourceRecordSets
HAS_BOTO = True HAS_BOTO = True
except ImportError: except ImportError:
HAS_BOTO = False HAS_BOTO = False
def get_zone_by_name(conn, module, zone_name, want_private, zone_id, want_vpc_id):
"""Finds a zone by name or zone_id"""
for zone in conn.get_zones():
# only save this zone id if the private status of the zone matches
# the private_zone_in boolean specified in the params
private_zone = module.boolean(zone.config.get('PrivateZone', False))
if private_zone == want_private and ((zone.name == zone_name and zone_id == None) or zone.id.replace('/hostedzone/', '') == zone_id):
if want_vpc_id:
# NOTE: These details aren't available in other boto methods, hence the necessary
# extra API call
zone_details = conn.get_hosted_zone(zone.id)['GetHostedZoneResponse']
# this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
if isinstance(zone_details['VPCs'], dict):
if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id:
return zone
else: # Forward compatibility for when boto fixes that bug
if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
return zone
else:
return zone
return None
def commit(changes, retry_interval): def commit(changes, retry_interval):
"""Commit changes, but retry PriorRequestNotComplete errors.""" """Commit changes, but retry PriorRequestNotComplete errors."""
@ -191,6 +299,7 @@ def main():
argument_spec.update(dict( argument_spec.update(dict(
command = dict(choices=['get', 'create', 'delete'], required=True), command = dict(choices=['get', 'create', 'delete'], required=True),
zone = dict(required=True), zone = dict(required=True),
hosted_zone_id = dict(required=False, default=None),
record = dict(required=True), record = dict(required=True),
ttl = dict(required=False, type='int', default=3600), ttl = dict(required=False, type='int', default=3600),
type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True),
@ -200,6 +309,12 @@ def main():
overwrite = dict(required=False, type='bool'), overwrite = dict(required=False, type='bool'),
retry_interval = dict(required=False, default=500), retry_interval = dict(required=False, default=500),
private_zone = dict(required=False, type='bool', default=False), private_zone = dict(required=False, type='bool', default=False),
identifier = dict(required=False),
weight = dict(required=False, type='int'),
region = dict(required=False),
health_check = dict(required=False),
failover = dict(required=False),
vpc_id = dict(required=False),
) )
) )
module = AnsibleModule(argument_spec=argument_spec) module = AnsibleModule(argument_spec=argument_spec)
@ -209,6 +324,7 @@ def main():
command_in = module.params.get('command') command_in = module.params.get('command')
zone_in = module.params.get('zone').lower() zone_in = module.params.get('zone').lower()
hosted_zone_id_in = module.params.get('hosted_zone_id')
ttl_in = module.params.get('ttl') ttl_in = module.params.get('ttl')
record_in = module.params.get('record').lower() record_in = module.params.get('record').lower()
type_in = module.params.get('type') type_in = module.params.get('type')
@ -217,6 +333,12 @@ def main():
alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
retry_interval_in = module.params.get('retry_interval') retry_interval_in = module.params.get('retry_interval')
private_zone_in = module.params.get('private_zone') private_zone_in = module.params.get('private_zone')
identifier_in = module.params.get('identifier')
weight_in = module.params.get('weight')
region_in = module.params.get('region')
health_check_in = module.params.get('health_check')
failover_in = module.params.get('failover')
vpc_id_in = module.params.get('vpc_id')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
@ -224,7 +346,7 @@ def main():
if type(value_in) is str: if type(value_in) is str:
if value_in: if value_in:
value_list = sorted(value_in.split(',')) value_list = sorted([s.strip() for s in value_in.split(',')])
elif type(value_in) is list: elif type(value_in) is list:
value_list = sorted(value_in) value_list = sorted(value_in)
@ -243,38 +365,45 @@ def main():
elif not alias_hosted_zone_id_in: elif not alias_hosted_zone_id_in:
module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete") module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete")
if vpc_id_in and not private_zone_in:
module.fail_json(msg="parameter 'private_zone' must be true when specifying parameter"
" 'vpc_id'")
# connect to the route53 endpoint # connect to the route53 endpoint
try: try:
conn = Route53Connection(**aws_connect_kwargs) conn = Route53Connection(**aws_connect_kwargs)
except boto.exception.BotoServerError, e: except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message) module.fail_json(msg = e.error_message)
# Get all the existing hosted zones and save their ID's # Find the named zone ID
zones = {} zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in, vpc_id_in)
results = conn.get_all_hosted_zones()
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
# only save this zone id if the private status of the zone matches
# the private_zone_in boolean specified in the params
if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zones[r53zone['Name']] = zone_id
# Verify that the requested zone is already defined in Route53 # Verify that the requested zone is already defined in Route53
if not zone_in in zones: if zone is None:
errmsg = "Zone %s does not exist in Route53" % zone_in errmsg = "Zone %s does not exist in Route53" % zone_in
module.fail_json(msg = errmsg) module.fail_json(msg = errmsg)
record = {} record = {}
found_record = False found_record = False
sets = conn.get_all_rrsets(zones[zone_in]) wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in,
identifier=identifier_in, weight=weight_in, region=region_in,
health_check=health_check_in, failover=failover_in)
for v in value_list:
if alias_in:
wanted_rset.set_alias(alias_hosted_zone_id_in, v)
else:
wanted_rset.add_value(v)
sets = conn.get_all_rrsets(zone.id, name=record_in, type=type_in, identifier=identifier_in)
for rset in sets: for rset in sets:
# Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round
# tripping of things like * and @. # tripping of things like * and @.
decoded_name = rset.name.replace(r'\052', '*') decoded_name = rset.name.replace(r'\052', '*')
decoded_name = decoded_name.replace(r'\100', '@') decoded_name = decoded_name.replace(r'\100', '@')
if rset.type == type_in and decoded_name.lower() == record_in.lower(): if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in:
found_record = True found_record = True
record['zone'] = zone_in record['zone'] = zone_in
record['type'] = rset.type record['type'] = rset.type
@ -282,6 +411,15 @@ def main():
record['ttl'] = rset.ttl record['ttl'] = rset.ttl
record['value'] = ','.join(sorted(rset.resource_records)) record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records) record['values'] = sorted(rset.resource_records)
if hosted_zone_id_in:
record['hosted_zone_id'] = hosted_zone_id_in
record['identifier'] = rset.identifier
record['weight'] = rset.weight
record['region'] = rset.region
record['failover'] = rset.failover
record['health_check'] = rset.health_check
if hosted_zone_id_in:
record['hosted_zone_id'] = hosted_zone_id_in
if rset.alias_dns_name: if rset.alias_dns_name:
record['alias'] = True record['alias'] = True
record['value'] = rset.alias_dns_name record['value'] = rset.alias_dns_name
@ -291,35 +429,32 @@ def main():
record['alias'] = False record['alias'] = False
record['value'] = ','.join(sorted(rset.resource_records)) record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records) record['values'] = sorted(rset.resource_records)
if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create': if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml():
module.exit_json(changed=False) module.exit_json(changed=False)
break
if command_in == 'get': if command_in == 'get':
module.exit_json(changed=False, set=record) if type_in == 'NS':
ns = record['values']
else:
# Retrieve name servers associated to the zone.
ns = conn.get_zone(zone_in).get_nameservers()
module.exit_json(changed=False, set=record, nameservers=ns)
if command_in == 'delete' and not found_record: if command_in == 'delete' and not found_record:
module.exit_json(changed=False) module.exit_json(changed=False)
changes = ResourceRecordSets(conn, zones[zone_in]) changes = ResourceRecordSets(conn, zone.id)
if command_in == 'create' and found_record:
if not module.params['overwrite']:
module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it")
else:
change = changes.add_change("DELETE", record_in, type_in, record['ttl'])
for v in record['values']:
if record['alias']:
change.set_alias(record['alias_hosted_zone_id'], v)
else:
change.add_value(v)
if command_in == 'create' or command_in == 'delete': if command_in == 'create' or command_in == 'delete':
change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in) if command_in == 'create' and found_record:
for v in value_list: if not module.params['overwrite']:
if module.params['alias']: module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it")
change.set_alias(alias_hosted_zone_id_in, v) command = 'UPSERT'
else: else:
change.add_value(v) command = command_in.upper()
changes.add_change_record(command, wanted_rset)
try: try:
result = commit(changes, retry_interval_in) result = commit(changes, retry_interval_in)

View file

@ -22,19 +22,107 @@ description:
- This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto. - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto.
version_added: "1.1" version_added: "1.1"
options: options:
aws_access_key:
description:
- AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: ['ec2_secret_key', 'secret_key']
bucket: bucket:
description: description:
- Bucket name. - Bucket name.
required: true required: true
default: null default: null
aliases: [] aliases: []
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
aliases: []
version_added: "1.3"
encrypt:
description:
- When set for PUT mode, asks for server-side encryption
required: false
default: no
version_added: "2.0"
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
required: false
default: 600
aliases: []
marker:
description:
- Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
required: false
default: null
version_added: "2.0"
max_keys:
description:
- Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
required: false
default: 1000
version_added: "2.0"
metadata:
description:
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "1.6"
mode:
description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys (2.0+)), create (bucket), delete (bucket), and delobj (delete object).
required: true
default: null
aliases: []
object: object:
description: description:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
required: false required: false
default: null default: null
prefix:
description:
- Limits the response to keys that begin with the specified prefix for list mode
required: false
default: null
version_added: "2.0"
version:
description:
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
required: false
default: null
aliases: [] aliases: []
version_added: "1.3" version_added: "2.0"
overwrite:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
required: false
default: true
version_added: "1.2"
region:
description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
required: false
default: null
version_added: "1.8"
retries:
description:
- On recoverable failure, how many times to retry before actually failing.
required: false
default: 0
version_added: "2.0"
s3_url:
description:
- S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
default: null
aliases: [ S3_URL ]
src: src:
description: description:
- The source file path when performing a PUT operation. - The source file path when performing a PUT operation.
@ -42,48 +130,11 @@ options:
default: null default: null
aliases: [] aliases: []
version_added: "1.3" version_added: "1.3"
dest:
description: requirements: [ "boto" ]
- The destination file path when downloading an object/key with a GET operation. author:
required: false - "Lester Wade (@lwade)"
aliases: [] - "Ralph Tice (@ralph-tice)"
version_added: "1.3"
overwrite:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. Starting with (v2.0) the valid values for this parameter are (always, never, different) and boolean is still accepted for backward compatibility, If the value set to (different) the file would be uploaded/downloaded only if the checksums are different.
required: false
default: always
version_added: "1.2"
mode:
description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket).
required: true
default: null
aliases: []
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
required: false
default: 600
aliases: []
s3_url:
description:
- "S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS"
default: null
aliases: [ S3_URL ]
metadata:
description:
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "1.6"
region:
description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
required: false
default: null
version_added: "1.8"
author: Lester Wade, Ralph Tice
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''
@ -94,9 +145,18 @@ EXAMPLES = '''
# Simple GET operation # Simple GET operation
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# Get a specific version of an object.
- s3: bucket=mybucket object=/my/desired/key.txt version=48c9ee5131af7a716edc22df9772aa6f dest=/usr/local/myfile.txt mode=get
# PUT/upload with metadata # PUT/upload with metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
# List keys simple
- s3: bucket=mybucket mode=list
# List keys all options
- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472
# Create an empty bucket # Create an empty bucket
- s3: bucket=mybucket mode=create - s3: bucket=mybucket mode=create
@ -106,16 +166,20 @@ EXAMPLES = '''
# Delete a bucket and all contents # Delete a bucket and all contents
- s3: bucket=mybucket mode=delete - s3: bucket=mybucket mode=delete
# GET an object but dont download if the file checksums match # GET an object but dont download if the file checksums match
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different
# Delete an object from a bucket
- s3: bucket=mybucket object=/my/desired/key.txt mode=delobj
''' '''
import os import os
import urlparse import urlparse
import hashlib from ssl import SSLError
try: try:
import boto import boto
import boto.ec2
from boto.s3.connection import Location from boto.s3.connection import Location
from boto.s3.connection import OrdinaryCallingFormat from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import S3Connection from boto.s3.connection import S3Connection
@ -123,20 +187,23 @@ try:
except ImportError: except ImportError:
HAS_BOTO = False HAS_BOTO = False
def key_check(module, s3, bucket, obj): def key_check(module, s3, bucket, obj, version=None):
try: try:
bucket = s3.lookup(bucket) bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj) key_check = bucket.get_key(obj, version_id=version)
except s3.provider.storage_response_error, e: except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e)) if version is not None and e.status == 400: # If a specified version doesn't exist a 400 is returned.
key_check = None
else:
module.fail_json(msg=str(e))
if key_check: if key_check:
return True return True
else: else:
return False return False
def keysum(module, s3, bucket, obj): def keysum(module, s3, bucket, obj, version=None):
bucket = s3.lookup(bucket) bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj) key_check = bucket.get_key(obj, version_id=version)
if not key_check: if not key_check:
return None return None
md5_remote = key_check.etag[1:-1] md5_remote = key_check.etag[1:-1]
@ -165,6 +232,19 @@ def create_bucket(module, s3, bucket, location=None):
if bucket: if bucket:
return True return True
def get_bucket(module, s3, bucket):
try:
return s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def list_keys(module, bucket_object, prefix, marker, max_keys):
all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys)
keys = [x.key for x in all_keys]
module.exit_json(msg="LIST operation complete", s3_keys=keys)
def delete_bucket(module, s3, bucket): def delete_bucket(module, s3, bucket):
try: try:
bucket = s3.lookup(bucket) bucket = s3.lookup(bucket)
@ -207,7 +287,8 @@ def path_check(path):
else: else:
return False return False
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata):
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt):
try: try:
bucket = s3.lookup(bucket) bucket = s3.lookup(bucket)
key = bucket.new_key(obj) key = bucket.new_key(obj)
@ -215,25 +296,34 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata):
for meta_key in metadata.keys(): for meta_key in metadata.keys():
key.set_metadata(meta_key, metadata[meta_key]) key.set_metadata(meta_key, metadata[meta_key])
key.set_contents_from_filename(src) key.set_contents_from_filename(src, encrypt_key=encrypt)
url = key.generate_url(expiry) url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True) module.exit_json(msg="PUT operation complete", url=url, changed=True)
except s3.provider.storage_copy_error, e: except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e)) module.fail_json(msg= str(e))
def download_s3file(module, s3, bucket, obj, dest): def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
try: # retries is the number of loops; range/xrange needs to be one
bucket = s3.lookup(bucket) # more to get that count of loops.
key = bucket.lookup(obj) bucket = s3.lookup(bucket)
key.get_contents_to_filename(dest) key = bucket.get_key(obj, version_id=version)
module.exit_json(msg="GET operation complete", changed=True) for x in range(0, retries + 1):
except s3.provider.storage_copy_error, e: try:
module.fail_json(msg= str(e)) key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
except SSLError as e:
# actually fail on last pass through the loop.
if x >= retries:
module.fail_json(msg="s3 download failed; %s" % e)
# otherwise, try again, this may be a transient timeout.
pass
def download_s3str(module, s3, bucket, obj): def download_s3str(module, s3, bucket, obj, version=None):
try: try:
bucket = s3.lookup(bucket) bucket = s3.lookup(bucket)
key = bucket.lookup(obj) key = bucket.get_key(obj, version_id=version)
contents = key.get_contents_as_string() contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True) module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except s3.provider.storage_copy_error, e: except s3.provider.storage_copy_error, e:
@ -265,18 +355,25 @@ def is_walrus(s3_url):
else: else:
return False return False
def main(): def main():
argument_spec = ec2_argument_spec() argument_spec = ec2_argument_spec()
argument_spec.update(dict( argument_spec.update(dict(
bucket = dict(required=True), bucket = dict(required=True),
object = dict(),
src = dict(),
dest = dict(default=None), dest = dict(default=None),
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True), encrypt = dict(default=True, type='bool'),
expiry = dict(default=600, aliases=['expiration']), expiry = dict(default=600, aliases=['expiration']),
s3_url = dict(aliases=['S3_URL']), marker = dict(default=None),
overwrite = dict(aliases=['force'], default='always'), max_keys = dict(default=1000),
metadata = dict(type='dict'), metadata = dict(type='dict'),
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
object = dict(),
version = dict(default=None),
overwrite = dict(aliases=['force'], default='always'),
prefix = dict(default=None),
retries = dict(aliases=['retry'], type='int', default=0),
s3_url = dict(aliases=['S3_URL']),
src = dict(),
), ),
) )
module = AnsibleModule(argument_spec=argument_spec) module = AnsibleModule(argument_spec=argument_spec)
@ -285,15 +382,27 @@ def main():
module.fail_json(msg='boto required for this module') module.fail_json(msg='boto required for this module')
bucket = module.params.get('bucket') bucket = module.params.get('bucket')
obj = module.params.get('object') encrypt = module.params.get('encrypt')
src = module.params.get('src') expiry = int(module.params['expiry'])
if module.params.get('dest'): if module.params.get('dest'):
dest = os.path.expanduser(module.params.get('dest')) dest = os.path.expanduser(module.params.get('dest'))
mode = module.params.get('mode') marker = module.params.get('marker')
expiry = int(module.params['expiry']) max_keys = module.params.get('max_keys')
s3_url = module.params.get('s3_url')
overwrite = module.params.get('overwrite')
metadata = module.params.get('metadata') metadata = module.params.get('metadata')
mode = module.params.get('mode')
obj = module.params.get('object')
version = module.params.get('version')
overwrite = module.params.get('overwrite')
prefix = module.params.get('prefix')
retries = module.params.get('retries')
s3_url = module.params.get('s3_url')
src = module.params.get('src')
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite='never'
if overwrite not in ['always', 'never', 'different']: if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite): if module.boolean(overwrite):
@ -356,29 +465,33 @@ def main():
module.fail_json(msg="Target bucket cannot be found", failed=True) module.fail_json(msg="Target bucket cannot be found", failed=True)
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj) keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is False: if keyrtn is False:
module.fail_json(msg="Target key cannot be found", failed=True) if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
# If the destination path doesn't exist, no need to md5um etag check, so just download. # If the destination path doesn't exist, no need to md5um etag check, so just download.
pathrtn = path_check(dest) pathrtn = path_check(dest)
if pathrtn is False: if pathrtn is False:
download_s3file(module, s3, bucket, obj, dest) download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
if pathrtn is True: if pathrtn is True:
md5_remote = keysum(module, s3, bucket, obj) md5_remote = keysum(module, s3, bucket, obj, version=version)
md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() md5_local = module.md5(dest)
if md5_local == md5_remote: if md5_local == md5_remote:
sum_matches = True sum_matches = True
if overwrite == 'always': if overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest) download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else: else:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
else: else:
sum_matches = False sum_matches = False
if overwrite in ('always', 'different'): if overwrite in ('always', 'different'):
download_s3file(module, s3, bucket, obj, dest) download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else: else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.") module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
@ -388,9 +501,7 @@ def main():
# At this point explicitly define the overwrite condition. # At this point explicitly define the overwrite condition.
if sum_matches is True and pathrtn is True and overwrite == 'always': if sum_matches is True and pathrtn is True and overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest) download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# If sum does not match but the destination exists, we
# if our mode is a PUT operation (upload), go through the procedure as appropriate ... # if our mode is a PUT operation (upload), go through the procedure as appropriate ...
if mode == 'put': if mode == 'put':
@ -412,30 +523,47 @@ def main():
# Lets check key state. Does it exist and if it does, compute the etag md5sum. # Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucketrtn is True and keyrtn is True: if bucketrtn is True and keyrtn is True:
md5_remote = keysum(module, s3, bucket, obj) md5_remote = keysum(module, s3, bucket, obj)
md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() md5_local = module.md5(src)
if md5_local == md5_remote: if md5_local == md5_remote:
sum_matches = True sum_matches = True
if overwrite == 'always': if overwrite == 'always':
upload_s3file(module, s3, bucket, obj, src, expiry, metadata) upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
else: else:
get_download_url(module, s3, bucket, obj, expiry, changed=False) get_download_url(module, s3, bucket, obj, expiry, changed=False)
else: else:
sum_matches = False sum_matches = False
if overwrite in ('always', 'different'): if overwrite in ('always', 'different'):
upload_s3file(module, s3, bucket, obj, src, expiry, metadata) upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
else: else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.") module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
# If neither exist (based on bucket existence), we can create both. # If neither exist (based on bucket existence), we can create both.
if bucketrtn is False and pathrtn is True: if bucketrtn is False and pathrtn is True:
create_bucket(module, s3, bucket, location) create_bucket(module, s3, bucket, location)
upload_s3file(module, s3, bucket, obj, src, expiry, metadata) upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
# If bucket exists but key doesn't, just upload. # If bucket exists but key doesn't, just upload.
if bucketrtn is True and pathrtn is True and keyrtn is False: if bucketrtn is True and pathrtn is True and keyrtn is False:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata) upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
# Support for deleting an object if we have both params. # Delete an object from a bucket, not the entire bucket
if mode == 'delobj':
if obj is None:
module.fail_json(msg="object parameter is required", failed=True);
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_key(module, s3, bucket, obj)
if deletertn is True:
module.exit_json(msg="Object %s deleted from bucket %s." % (obj, bucket), changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Delete an entire bucket, including all objects in the bucket
if mode == 'delete': if mode == 'delete':
if bucket: if bucket:
bucketrtn = bucket_check(module, s3, bucket) bucketrtn = bucket_check(module, s3, bucket)
@ -448,6 +576,16 @@ def main():
else: else:
module.fail_json(msg="Bucket parameter is required.", failed=True) module.fail_json(msg="Bucket parameter is required.", failed=True)
# Support for listing a set of keys
if mode == 'list':
bucket_object = get_bucket(module, s3, bucket)
# If the bucket does not exist then bail out
if bucket_object is None:
module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True)
list_keys(module, bucket_object, prefix, marker, max_keys)
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
if mode == 'create': if mode == 'create':
@ -494,11 +632,14 @@ def main():
if bucketrtn is False: if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else: else:
keyrtn = key_check(module, s3, bucket, obj) keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is True: if keyrtn is True:
download_s3str(module, s3, bucket, obj) download_s3str(module, s3, bucket, obj, version=version)
else: else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True) if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
module.exit_json(failed=False) module.exit_json(failed=False)

View file

@ -53,7 +53,7 @@ options:
default: null default: null
role_size: role_size:
description: description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6) - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false required: false
default: Small default: Small
endpoints: endpoints:
@ -110,11 +110,39 @@ options:
required: false required: false
default: 'present' default: 'present'
aliases: [] aliases: []
reset_pass_atlogon:
description:
- Reset the admin password on first logon for windows hosts
required: false
default: "no"
version_added: "2.0"
choices: [ "yes", "no" ]
auto_updates:
description:
- Enable Auto Updates on Windows Machines
required: false
version_added: "2.0"
default: "no"
choices: [ "yes", "no" ]
enable_winrm:
description:
- Enable winrm on Windows Machines
required: false
version_added: "2.0"
default: "yes"
choices: [ "yes", "no" ]
os_type:
description:
- The type of the os that is gettings provisioned
required: false
version_added: "2.0"
default: "linux"
choices: [ "windows", "linux" ]
requirements: requirements:
- "python >= 2.6" - "python >= 2.6"
- "azure >= 0.7.1" - "azure >= 0.7.1"
author: John Whitbeck author: "John Whitbeck (@jwhitbeck)"
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -138,6 +166,29 @@ EXAMPLES = '''
module: azure module: azure
name: my-virtual-machine name: my-virtual-machine
state: absent state: absent
#Create windows machine
- hosts: all
connection: local
tasks:
- local_action:
module: azure
name: "ben-Winows-23"
hostname: "win123"
os_type: windows
enable_winrm: yes
subscription_id: "{{ azure_sub_id }}"
management_cert_path: "{{ azure_cert_path }}"
role_size: Small
image: 'bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5'
location: 'East Asia'
password: "xxx"
storage_account: benooytes
user: admin
wait: yes
virtual_network_name: "{{ vnet_name }}"
''' '''
import base64 import base64
@ -184,6 +235,14 @@ AZURE_ROLE_SIZES = ['ExtraSmall',
'Standard_D12', 'Standard_D12',
'Standard_D13', 'Standard_D13',
'Standard_D14', 'Standard_D14',
'Standard_DS1',
'Standard_DS2',
'Standard_DS3',
'Standard_DS4',
'Standard_DS11',
'Standard_DS12',
'Standard_DS13',
'Standard_DS14',
'Standard_G1', 'Standard_G1',
'Standard_G2', 'Standard_G2',
'Standard_G3', 'Standard_G3',
@ -196,7 +255,7 @@ try:
from azure import WindowsAzureError, WindowsAzureMissingResourceError from azure import WindowsAzureError, WindowsAzureMissingResourceError
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys, from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints, PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint) ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
HAS_AZURE = True HAS_AZURE = True
except ImportError: except ImportError:
HAS_AZURE = False HAS_AZURE = False
@ -264,6 +323,7 @@ def create_virtual_machine(module, azure):
True if a new virtual machine and/or cloud service was created, false otherwise True if a new virtual machine and/or cloud service was created, false otherwise
""" """
name = module.params.get('name') name = module.params.get('name')
os_type = module.params.get('os_type')
hostname = module.params.get('hostname') or name + ".cloudapp.net" hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',') endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path') ssh_cert_path = module.params.get('ssh_cert_path')
@ -296,9 +356,20 @@ def create_virtual_machine(module, azure):
except WindowsAzureMissingResourceError: except WindowsAzureMissingResourceError:
# vm does not exist; create it # vm does not exist; create it
# Create linux configuration if os_type == 'linux':
disable_ssh_password_authentication = not password # Create linux configuration
linux_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication) disable_ssh_password_authentication = not password
vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
else:
#Create Windows Config
vm_config = WindowsConfigurationSet(hostname, password, module.params.get('reset_pass_atlogon'),\
module.params.get('auto_updates'), None, user)
vm_config.domain_join = None
if module.params.get('enable_winrm'):
listener = Listener('Http')
vm_config.win_rm.listeners.listeners.append(listener)
else:
vm_config.win_rm = None
# Add ssh certificates if specified # Add ssh certificates if specified
if ssh_cert_path: if ssh_cert_path:
@ -313,7 +384,7 @@ def create_virtual_machine(module, azure):
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint)) ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config # Append ssh config to linux machine config
linux_config.ssh = ssh_config vm_config.ssh = ssh_config
# Create network configuration # Create network configuration
network_config = ConfigurationSetInputEndpoints() network_config = ConfigurationSetInputEndpoints()
@ -340,7 +411,7 @@ def create_virtual_machine(module, azure):
deployment_slot='production', deployment_slot='production',
label=name, label=name,
role_name=name, role_name=name,
system_config=linux_config, system_config=vm_config,
network_config=network_config, network_config=network_config,
os_virtual_hard_disk=os_hd, os_virtual_hard_disk=os_hd,
role_size=role_size, role_size=role_size,
@ -448,6 +519,7 @@ def main():
ssh_cert_path=dict(), ssh_cert_path=dict(),
name=dict(), name=dict(),
hostname=dict(), hostname=dict(),
os_type=dict(default='linux', choices=['linux', 'windows']),
location=dict(choices=AZURE_LOCATIONS), location=dict(choices=AZURE_LOCATIONS),
role_size=dict(choices=AZURE_ROLE_SIZES), role_size=dict(choices=AZURE_ROLE_SIZES),
subscription_id=dict(no_log=True), subscription_id=dict(no_log=True),
@ -461,7 +533,10 @@ def main():
state=dict(default='present'), state=dict(default='present'),
wait=dict(type='bool', default=False), wait=dict(type='bool', default=False),
wait_timeout=dict(default=600), wait_timeout=dict(default=600),
wait_timeout_redirects=dict(default=300) wait_timeout_redirects=dict(default=300),
reset_pass_atlogon=dict(type='bool', default=False),
auto_updates=dict(type='bool', default=False),
enable_winrm=dict(type='bool', default=True),
) )
) )
if not HAS_AZURE: if not HAS_AZURE:
@ -492,8 +567,8 @@ def main():
module.fail_json(msg='location parameter is required for new instance') module.fail_json(msg='location parameter is required for new instance')
if not module.params.get('storage_account'): if not module.params.get('storage_account'):
module.fail_json(msg='storage_account parameter is required for new instance') module.fail_json(msg='storage_account parameter is required for new instance')
if not module.params.get('password'): if not (module.params.get('password') or module.params.get('ssh_cert_path')):
module.fail_json(msg='password parameter is required for new instance') module.fail_json(msg='password or ssh_cert_path parameter is required for new instance')
(changed, public_dns_name, deployment) = create_virtual_machine(module, azure) (changed, public_dns_name, deployment) = create_virtual_machine(module, azure)
module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__))) module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__)))

View file

@ -22,6 +22,7 @@ short_description: Create/delete a droplet/SSH_key in DigitalOcean
description: description:
- Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key. - Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key.
version_added: "1.3" version_added: "1.3"
author: "Vincent Viallet (@zbal)"
options: options:
command: command:
description: description:

View file

@ -22,6 +22,7 @@ short_description: Create/delete a DNS record in DigitalOcean
description: description:
- Create/delete a DNS record in DigitalOcean. - Create/delete a DNS record in DigitalOcean.
version_added: "1.6" version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options: options:
state: state:
description: description:

View file

@ -22,6 +22,7 @@ short_description: Create/delete an SSH key in DigitalOcean
description: description:
- Create/delete an SSH key. - Create/delete an SSH key.
version_added: "1.6" version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options: options:
state: state:
description: description:

View file

@ -59,10 +59,10 @@ options:
version_added: "1.5" version_added: "1.5"
ports: ports:
description: description:
- List containing private to public port mapping specification. Use docker - "List containing private to public port mapping specification.
- 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' Use docker 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)'
- where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - a host interface.
- a host interface. The container ports need to be exposed either in the Dockerfile or via the C(expose) option."
default: null default: null
version_added: "1.5" version_added: "1.5"
expose: expose:
@ -92,6 +92,23 @@ options:
- 'alias. Use docker CLI-style syntax: C(redis:myredis).' - 'alias. Use docker CLI-style syntax: C(redis:myredis).'
default: null default: null
version_added: "1.5" version_added: "1.5"
log_driver:
description:
- You can specify a different logging driver for the container than for the daemon.
"json-file" Default logging driver for Docker. Writes JSON messages to file.
docker logs command is available only for this logging driver.
"none" disables any logging for the container. docker logs won't be available with this driver.
"syslog" Syslog logging driver for Docker. Writes log messages to syslog.
docker logs command is not available for this logging driver.
If not defined explicitly, the Docker daemon's default ("json-file") will apply.
Requires docker >= 1.6.0.
required: false
default: json-file
choices:
- json-file
- none
- syslog
version_added: "2.0"
memory_limit: memory_limit:
description: description:
- RAM allocated to the container as a number of bytes or as a human-readable - RAM allocated to the container as a number of bytes or as a human-readable
@ -143,6 +160,12 @@ options:
specified by docker-py. specified by docker-py.
default: docker-py default remote API version default: docker-py default remote API version
version_added: "1.8" version_added: "1.8"
docker_user:
description:
- Username or UID to use within the container
required: false
default: null
version_added: "2.0"
username: username:
description: description:
- Remote API username. - Remote API username.
@ -174,8 +197,16 @@ options:
default: null default: null
detach: detach:
description: description:
- Enable detached mode to leave the container running in background. - Enable detached mode to leave the container running in background. If
disabled, fail unless the process exits cleanly.
default: true default: true
signal:
version_added: "2.0"
description:
- With the state "killed", you can alter the signal sent to the
container.
required: false
default: KILL
state: state:
description: description:
- Assert the container's desired state. "present" only asserts that the - Assert the container's desired state. "present" only asserts that the
@ -234,6 +265,12 @@ options:
default: DockerHub default: DockerHub
aliases: [] aliases: []
version_added: "1.8" version_added: "1.8"
read_only:
description:
- Mount the container's root filesystem as read only
default: false
aliases: []
version_added: "2.0"
restart_policy: restart_policy:
description: description:
- Container restart policy. - Container restart policy.
@ -255,8 +292,30 @@ options:
docker-py >= 0.5.0. docker-py >= 0.5.0.
default: false default: false
version_added: "1.9" version_added: "1.9"
cpu_set:
author: Cove Schneider, Joshua Conner, Pavel Antonov, Ash Wilson description:
- CPUs in which to allow execution. Requires docker-py >= 0.6.0.
required: false
default: null
version_added: "2.0"
cap_add:
description:
- Add capabilities for the container. Requires docker-py >= 0.5.0.
required: false
default: false
version_added: "2.0"
cap_drop:
description:
- Drop capabilities for the container. Requires docker-py >= 0.5.0.
required: false
default: false
aliases: []
version_added: "2.0"
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Ash Wilson (@smashwilson)"
requirements: requirements:
- "python >= 2.6" - "python >= 2.6"
- "docker-py >= 0.3.0" - "docker-py >= 0.3.0"
@ -367,6 +426,7 @@ from urlparse import urlparse
try: try:
import docker.client import docker.client
import docker.utils import docker.utils
import docker.errors
from requests.exceptions import RequestException from requests.exceptions import RequestException
except ImportError: except ImportError:
HAS_DOCKER_PY = False HAS_DOCKER_PY = False
@ -506,6 +566,12 @@ class DockerManager(object):
'restart_policy': ((0, 5, 0), '1.14'), 'restart_policy': ((0, 5, 0), '1.14'),
'extra_hosts': ((0, 7, 0), '1.3.1'), 'extra_hosts': ((0, 7, 0), '1.3.1'),
'pid': ((1, 0, 0), '1.17'), 'pid': ((1, 0, 0), '1.17'),
'log_driver': ((1, 2, 0), '1.18'),
'host_config': ((0, 7, 0), '1.15'),
'cpu_set': ((0, 6, 0), '1.14'),
'cap_add': ((0, 5, 0), '1.14'),
'cap_drop': ((0, 5, 0), '1.14'),
'read_only': ((1, 0, 0), '1.17'),
# Clientside only # Clientside only
'insecure_registry': ((0, 5, 0), '0.0') 'insecure_registry': ((0, 5, 0), '0.0')
} }
@ -517,24 +583,26 @@ class DockerManager(object):
self.volumes = None self.volumes = None
if self.module.params.get('volumes'): if self.module.params.get('volumes'):
self.binds = {} self.binds = {}
self.volumes = {} self.volumes = []
vols = self.module.params.get('volumes') vols = self.module.params.get('volumes')
for vol in vols: for vol in vols:
parts = vol.split(":") parts = vol.split(":")
# regular volume
if len(parts) == 1:
self.volumes.append(parts[0])
# host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
if len(parts) == 2: elif 2 <= len(parts) <= 3:
self.volumes[parts[1]] = {} # default to read-write
self.binds[parts[0]] = parts[1] ro = False
# with bind mode # with supplied bind mode
elif len(parts) == 3: if len(parts) == 3:
if parts[2] not in ['ro', 'rw']: if parts[2] not in ['ro', 'rw']:
self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"')
ro = parts[2] == 'ro' else:
self.volumes[parts[1]] = {} ro = parts[2] == 'ro'
self.binds[parts[0]] = {'bind': parts[1], 'ro': ro} self.binds[parts[0]] = {'bind': parts[1], 'ro': ro }
# docker mount (e.g. /www, mounts a docker volume /www on the container at the same location)
else: else:
self.volumes[parts[0]] = {} self.module.fail_json(msg='volumes support 1 to 3 arguments')
self.lxc_conf = None self.lxc_conf = None
if self.module.params.get('lxc_conf'): if self.module.params.get('lxc_conf'):
@ -713,6 +781,53 @@ class DockerManager(object):
else: else:
return None return None
def get_start_params(self):
"""
Create start params
"""
params = {
'lxc_conf': self.lxc_conf,
'binds': self.binds,
'port_bindings': self.port_bindings,
'publish_all_ports': self.module.params.get('publish_all_ports'),
'privileged': self.module.params.get('privileged'),
'links': self.links,
'network_mode': self.module.params.get('net'),
'read_only': self.module.params.get('read_only'),
}
optionals = {}
for optional_param in ('dns', 'volumes_from', 'restart_policy',
'restart_policy_retry', 'pid'):
optionals[optional_param] = self.module.params.get(optional_param)
if optionals['dns'] is not None:
self.ensure_capability('dns')
params['dns'] = optionals['dns']
if optionals['volumes_from'] is not None:
self.ensure_capability('volumes_from')
params['volumes_from'] = optionals['volumes_from']
if optionals['restart_policy'] is not None:
self.ensure_capability('restart_policy')
params['restart_policy'] = { 'Name': optionals['restart_policy'] }
if params['restart_policy']['Name'] == 'on-failure':
params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
if optionals['pid'] is not None:
self.ensure_capability('pid')
params['pid_mode'] = optionals['pid']
return params
def get_host_config(self):
"""
Create HostConfig object
"""
params = self.get_start_params()
return docker.utils.create_host_config(**params)
def get_port_bindings(self, ports): def get_port_bindings(self, ports):
""" """
Parse the `ports` string into a port bindings dict for the `start_container` call. Parse the `ports` string into a port bindings dict for the `start_container` call.
@ -1041,15 +1156,14 @@ class DockerManager(object):
for container_port, config in self.port_bindings.iteritems(): for container_port, config in self.port_bindings.iteritems():
if isinstance(container_port, int): if isinstance(container_port, int):
container_port = "{0}/tcp".format(container_port) container_port = "{0}/tcp".format(container_port)
bind = {}
if len(config) == 1: if len(config) == 1:
bind['HostIp'] = "0.0.0.0" expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}]
bind['HostPort'] = "" elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for hostip, hostport in config:
expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)})
else: else:
bind['HostIp'] = config[0] expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
bind['HostPort'] = str(config[1])
expected_bound_ports[container_port] = [bind]
actual_bound_ports = container['HostConfig']['PortBindings'] or {} actual_bound_ports = container['HostConfig']['PortBindings'] or {}
@ -1086,8 +1200,8 @@ class DockerManager(object):
# NETWORK MODE # NETWORK MODE
expected_netmode = self.module.params.get('net') or '' expected_netmode = self.module.params.get('net') or 'bridge'
actual_netmode = container['HostConfig']['NetworkMode'] actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge'
if actual_netmode != expected_netmode: if actual_netmode != expected_netmode:
self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode))
differing.append(container) differing.append(container)
@ -1110,6 +1224,16 @@ class DockerManager(object):
self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from))
differing.append(container) differing.append(container)
# LOG_DRIVER
if self.ensure_capability('log_driver', False) :
expected_log_driver = self.module.params.get('log_driver') or 'json-file'
actual_log_driver = container['HostConfig']['LogConfig']['Type']
if actual_log_driver != expected_log_driver:
self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver))
differing.append(container)
continue
return differing return differing
def get_deployed_containers(self): def get_deployed_containers(self):
@ -1206,44 +1330,7 @@ class DockerManager(object):
except Exception as e: except Exception as e:
self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e))
def create_containers(self, count=1): def create_host_config(self):
try:
mem_limit = _human_to_bytes(self.module.params.get('memory_limit'))
except ValueError as e:
self.module.fail_json(msg=str(e))
params = {'image': self.module.params.get('image'),
'command': self.module.params.get('command'),
'ports': self.exposed_ports,
'volumes': self.volumes,
'mem_limit': mem_limit,
'environment': self.env,
'hostname': self.module.params.get('hostname'),
'domainname': self.module.params.get('domainname'),
'detach': self.module.params.get('detach'),
'name': self.module.params.get('name'),
'stdin_open': self.module.params.get('stdin_open'),
'tty': self.module.params.get('tty'),
}
def do_create(count, params):
results = []
for _ in range(count):
result = self.client.create_container(**params)
self.increment_counter('created')
results.append(result)
return results
try:
containers = do_create(count, params)
except:
self.pull_image()
containers = do_create(count, params)
return containers
def start_containers(self, containers):
params = { params = {
'lxc_conf': self.lxc_conf, 'lxc_conf': self.lxc_conf,
'binds': self.binds, 'binds': self.binds,
@ -1256,7 +1343,8 @@ class DockerManager(object):
optionals = {} optionals = {}
for optional_param in ('dns', 'volumes_from', 'restart_policy', for optional_param in ('dns', 'volumes_from', 'restart_policy',
'restart_policy_retry', 'pid', 'extra_hosts'): 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver',
'cap_add', 'cap_drop'):
optionals[optional_param] = self.module.params.get(optional_param) optionals[optional_param] = self.module.params.get(optional_param)
if optionals['dns'] is not None: if optionals['dns'] is not None:
@ -1273,6 +1361,10 @@ class DockerManager(object):
if params['restart_policy']['Name'] == 'on-failure': if params['restart_policy']['Name'] == 'on-failure':
params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
# docker_py only accepts 'host' or None
if 'pid' in optionals and not optionals['pid']:
optionals['pid'] = None
if optionals['pid'] is not None: if optionals['pid'] is not None:
self.ensure_capability('pid') self.ensure_capability('pid')
params['pid_mode'] = optionals['pid'] params['pid_mode'] = optionals['pid']
@ -1281,10 +1373,79 @@ class DockerManager(object):
self.ensure_capability('extra_hosts') self.ensure_capability('extra_hosts')
params['extra_hosts'] = optionals['extra_hosts'] params['extra_hosts'] = optionals['extra_hosts']
if optionals['log_driver'] is not None:
self.ensure_capability('log_driver')
log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON)
log_config.type = optionals['log_driver']
params['log_config'] = log_config
if optionals['cap_add'] is not None:
self.ensure_capability('cap_add')
params['cap_add'] = optionals['cap_add']
if optionals['cap_drop'] is not None:
self.ensure_capability('cap_drop')
params['cap_drop'] = optionals['cap_drop']
return docker.utils.create_host_config(**params)
def create_containers(self, count=1):
params = {'image': self.module.params.get('image'),
'command': self.module.params.get('command'),
'ports': self.exposed_ports,
'volumes': self.volumes,
'environment': self.env,
'hostname': self.module.params.get('hostname'),
'domainname': self.module.params.get('domainname'),
'detach': self.module.params.get('detach'),
'name': self.module.params.get('name'),
'stdin_open': self.module.params.get('stdin_open'),
'tty': self.module.params.get('tty'),
'cpuset': self.module.params.get('cpu_set'),
'host_config': self.create_host_config(),
'user': self.module.params.get('docker_user'),
}
if self.ensure_capability('host_config', fail=False):
params['host_config'] = self.get_host_config()
def do_create(count, params):
results = []
for _ in range(count):
result = self.client.create_container(**params)
self.increment_counter('created')
results.append(result)
return results
try:
containers = do_create(count, params)
except docker.errors.APIError as e:
if e.response.status_code != 404:
raise
self.pull_image()
containers = do_create(count, params)
return containers
def start_containers(self, containers):
params = {}
if not self.ensure_capability('host_config', fail=False):
params = self.get_start_params()
for i in containers: for i in containers:
self.client.start(i['Id'], **params) self.client.start(i)
self.increment_counter('started') self.increment_counter('started')
if not self.module.params.get('detach'):
status = self.client.wait(i['Id'])
if status != 0:
output = self.client.logs(i['Id'], stdout=True, stderr=True,
stream=False, timestamps=False)
self.module.fail_json(status=status, msg=output)
def stop_containers(self, containers): def stop_containers(self, containers):
for i in containers: for i in containers:
self.client.stop(i['Id']) self.client.stop(i['Id'])
@ -1299,7 +1460,7 @@ class DockerManager(object):
def kill_containers(self, containers): def kill_containers(self, containers):
for i in containers: for i in containers:
self.client.kill(i['Id']) self.client.kill(i['Id'], self.module.params.get('signal'))
self.increment_counter('killed') self.increment_counter('killed')
def restart_containers(self, containers): def restart_containers(self, containers):
@ -1453,6 +1614,7 @@ def main():
tls_ca_cert = dict(required=False, default=None, type='str'), tls_ca_cert = dict(required=False, default=None, type='str'),
tls_hostname = dict(required=False, type='str', default=None), tls_hostname = dict(required=False, type='str', default=None),
docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'), docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'),
docker_user = dict(default=None),
username = dict(default=None), username = dict(default=None),
password = dict(), password = dict(),
email = dict(), email = dict(),
@ -1463,6 +1625,7 @@ def main():
dns = dict(), dns = dict(),
detach = dict(default=True, type='bool'), detach = dict(default=True, type='bool'),
state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']), state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']),
signal = dict(default=None),
restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']),
restart_policy_retry = dict(default=0, type='int'), restart_policy_retry = dict(default=0, type='int'),
extra_hosts = dict(type='dict'), extra_hosts = dict(type='dict'),
@ -1475,6 +1638,11 @@ def main():
net = dict(default=None), net = dict(default=None),
pid = dict(default=None), pid = dict(default=None),
insecure_registry = dict(default=False, type='bool'), insecure_registry = dict(default=False, type='bool'),
log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']),
cpu_set = dict(default=None),
cap_add = dict(default=None, type='list'),
cap_drop = dict(default=None, type='list'),
read_only = dict(default=False, type='bool'),
), ),
required_together = ( required_together = (
['tls_client_cert', 'tls_client_key'], ['tls_client_cert', 'tls_client_key'],
@ -1500,10 +1668,14 @@ def main():
if count > 1 and name: if count > 1 and name:
module.fail_json(msg="Count and name must not be used together") module.fail_json(msg="Count and name must not be used together")
# Explicitly pull new container images, if requested. # Explicitly pull new container images, if requested. Do this before
# Do this before noticing running and deployed containers so that the image names will differ # noticing running and deployed containers so that the image names
# if a newer image has been pulled. # will differ if a newer image has been pulled.
if pull == "always": # Missing images should be pulled first to avoid downtime when old
# container is stopped, but image for new one is now downloaded yet.
# It also prevents removal of running container before realizing
# that requested image cannot be retrieved.
if pull == "always" or (state == 'reloaded' and manager.get_inspect_image() is None):
manager.pull_image() manager.pull_image()
containers = ContainerSet(manager) containers = ContainerSet(manager)
@ -1532,7 +1704,7 @@ def main():
summary=manager.counters, summary=manager.counters,
containers=containers.changed, containers=containers.changed,
reload_reasons=manager.get_reload_reason_message(), reload_reasons=manager.get_reload_reason_message(),
ansible_facts=_ansible_facts(containers.changed)) ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed)))
except DockerAPIError as e: except DockerAPIError as e:
module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation)

View file

@ -23,7 +23,7 @@
DOCUMENTATION = ''' DOCUMENTATION = '''
--- ---
module: docker_image module: docker_image
author: Pavel Antonov author: "Pavel Antonov (@softzilla)"
version_added: "1.5" version_added: "1.5"
short_description: manage docker images short_description: manage docker images
description: description:
@ -65,6 +65,12 @@ options:
required: false required: false
default: unix://var/run/docker.sock default: unix://var/run/docker.sock
aliases: [] aliases: []
docker_api_version:
description:
- Remote API version to use. This defaults to the current default as
specified by docker-py.
default: docker-py default remote API version
version_added: "2.0"
state: state:
description: description:
- Set the state of the image - Set the state of the image
@ -137,6 +143,14 @@ if HAS_DOCKER_CLIENT:
except ImportError: except ImportError:
from docker.client import APIError as DockerAPIError from docker.client import APIError as DockerAPIError
try:
# docker-py 1.2+
import docker.constants
DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION
except (ImportError, AttributeError):
# docker-py less than 1.2
DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION
class DockerImageManager: class DockerImageManager:
def __init__(self, module): def __init__(self, module):
@ -147,7 +161,10 @@ class DockerImageManager:
self.tag = self.module.params.get('tag') self.tag = self.module.params.get('tag')
self.nocache = self.module.params.get('nocache') self.nocache = self.module.params.get('nocache')
docker_url = urlparse(module.params.get('docker_url')) docker_url = urlparse(module.params.get('docker_url'))
self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout')) self.client = docker.Client(
base_url=docker_url.geturl(),
version=module.params.get('docker_api_version'),
timeout=module.params.get('timeout'))
self.changed = False self.changed = False
self.log = [] self.log = []
self.error_msg = None self.error_msg = None
@ -220,14 +237,17 @@ class DockerImageManager:
def main(): def main():
module = AnsibleModule( module = AnsibleModule(
argument_spec = dict( argument_spec = dict(
path = dict(required=False, default=None), path = dict(required=False, default=None),
dockerfile = dict(required=False, default="Dockerfile"), dockerfile = dict(required=False, default="Dockerfile"),
name = dict(required=True), name = dict(required=True),
tag = dict(required=False, default="latest"), tag = dict(required=False, default="latest"),
nocache = dict(default=False, type='bool'), nocache = dict(default=False, type='bool'),
state = dict(default='present', choices=['absent', 'present', 'build']), state = dict(default='present', choices=['absent', 'present', 'build']),
docker_url = dict(default='unix://var/run/docker.sock'), docker_url = dict(default='unix://var/run/docker.sock'),
timeout = dict(default=600, type='int'), docker_api_version = dict(required=False,
default=DEFAULT_DOCKER_API_VERSION,
type='str'),
timeout = dict(default=600, type='int'),
) )
) )
if not HAS_DOCKER_CLIENT: if not HAS_DOCKER_CLIENT:

View file

@ -84,7 +84,7 @@ requirements:
- "python >= 2.6" - "python >= 2.6"
- "boto >= 2.9" - "boto >= 2.9"
author: benno@ansible.com Note. Most of the code has been taken from the S3 module. author: "Benno Joy (@bennojoy)"
''' '''
@ -284,7 +284,7 @@ def get_download_url(module, gs, bucket, obj, expiry):
def handle_get(module, gs, bucket, obj, overwrite, dest): def handle_get(module, gs, bucket, obj, overwrite, dest):
md5_remote = keysum(module, gs, bucket, obj) md5_remote = keysum(module, gs, bucket, obj)
md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() md5_local = module.md5(dest)
if md5_local == md5_remote: if md5_local == md5_remote:
module.exit_json(changed=False) module.exit_json(changed=False)
if md5_local != md5_remote and not overwrite: if md5_local != md5_remote and not overwrite:
@ -300,7 +300,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
# Lets check key state. Does it exist and if it does, compute the etag md5sum. # Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucket_rc and key_rc: if bucket_rc and key_rc:
md5_remote = keysum(module, gs, bucket, obj) md5_remote = keysum(module, gs, bucket, obj)
md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() md5_local = module.md5(src)
if md5_local == md5_remote: if md5_local == md5_remote:
module.exit_json(msg="Local and remote object are identical", changed=False) module.exit_json(msg="Local and remote object are identical", changed=False)
if md5_local != md5_remote and not overwrite: if md5_local != md5_remote and not overwrite:

View file

@ -58,6 +58,14 @@ options:
required: false required: false
default: null default: null
aliases: [] aliases: []
service_account_permissions:
version_added: 2.0
description:
- service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information)
required: false
default: null
aliases: []
choices: ["bigquery", "cloud-platform", "compute-ro", "compute-rw", "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email"]
pem_file: pem_file:
version_added: 1.5.1 version_added: 1.5.1
description: description:
@ -142,7 +150,7 @@ requirements:
- "apache-libcloud >= 0.13.3" - "apache-libcloud >= 0.13.3"
notes: notes:
- Either I(name) or I(instance_names) is required. - Either I(name) or I(instance_names) is required.
author: Eric Johnson <erjohnso@google.com> author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -287,6 +295,8 @@ def create_instances(module, gce, instance_names):
ip_forward = module.params.get('ip_forward') ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip') external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete') disk_auto_delete = module.params.get('disk_auto_delete')
service_account_permissions = module.params.get('service_account_permissions')
service_account_email = module.params.get('service_account_email')
if external_ip == "none": if external_ip == "none":
external_ip = None external_ip = None
@ -317,7 +327,7 @@ def create_instances(module, gce, instance_names):
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata: if metadata:
try: try:
md = literal_eval(metadata) md = literal_eval(str(metadata))
if not isinstance(md, dict): if not isinstance(md, dict):
raise ValueError('metadata must be a dict') raise ValueError('metadata must be a dict')
except ValueError, e: except ValueError, e:
@ -330,6 +340,20 @@ def create_instances(module, gce, instance_names):
items.append({"key": k,"value": v}) items.append({"key": k,"value": v})
metadata = {'items': items} metadata = {'items': items}
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if not perm in gce.SA_SCOPES_MAP.keys():
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
if service_account_email:
ex_sa_perms.append({'email': service_account_email})
else:
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case # These variables all have default values but check just in case
if not lc_image or not lc_network or not lc_machine_type or not lc_zone: if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable', module.fail_json(msg='Missing required create instance variable',
@ -349,7 +373,7 @@ def create_instances(module, gce, instance_names):
inst = gce.create_node(name, lc_machine_type, lc_image, inst = gce.create_node(name, lc_machine_type, lc_image,
location=lc_zone, ex_network=network, ex_tags=tags, location=lc_zone, ex_network=network, ex_tags=tags,
ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward, ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete) external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, ex_service_accounts=ex_sa_perms)
changed = True changed = True
except ResourceExistsError: except ResourceExistsError:
inst = gce.ex_get_node(name, lc_zone) inst = gce.ex_get_node(name, lc_zone)
@ -437,6 +461,7 @@ def main():
tags = dict(type='list'), tags = dict(type='list'),
zone = dict(default='us-central1-a'), zone = dict(default='us-central1-a'),
service_account_email = dict(), service_account_email = dict(),
service_account_permissions = dict(type='list'),
pem_file = dict(), pem_file = dict(),
project_id = dict(), project_id = dict(),
ip_forward = dict(type='bool', default=False), ip_forward = dict(type='bool', default=False),

View file

@ -134,7 +134,7 @@ options:
requirements: requirements:
- "python >= 2.6" - "python >= 2.6"
- "apache-libcloud >= 0.13.3" - "apache-libcloud >= 0.13.3"
author: Eric Johnson <erjohnso@google.com> author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
''' '''
EXAMPLES = ''' EXAMPLES = '''

View file

@ -75,7 +75,7 @@ options:
aliases: [] aliases: []
state: state:
description: description:
- desired state of the persistent disk - desired state of the network or firewall
required: false required: false
default: "present" default: "present"
choices: ["active", "present", "absent", "deleted"] choices: ["active", "present", "absent", "deleted"]
@ -105,7 +105,7 @@ options:
requirements: requirements:
- "python >= 2.6" - "python >= 2.6"
- "apache-libcloud >= 0.13.3" - "apache-libcloud >= 0.13.3"
author: Eric Johnson <erjohnso@google.com> author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -264,7 +264,7 @@ def main():
if fw: if fw:
gce.ex_destroy_firewall(fw) gce.ex_destroy_firewall(fw)
changed = True changed = True
if name: elif name:
json_output['name'] = name json_output['name'] = name
network = None network = None
try: try:

View file

@ -120,7 +120,7 @@ options:
requirements: requirements:
- "python >= 2.6" - "python >= 2.6"
- "apache-libcloud >= 0.13.3" - "apache-libcloud >= 0.13.3"
author: Eric Johnson <erjohnso@google.com> author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
''' '''
EXAMPLES = ''' EXAMPLES = '''

View file

@ -92,7 +92,7 @@ requirements:
- "python >= 2.6" - "python >= 2.6"
- "linode-python" - "linode-python"
- "pycurl" - "pycurl"
author: Vincent Viallet author: "Vincent Viallet (@zbal)"
notes: notes:
- LINODE_API_KEY env variable can be used instead - LINODE_API_KEY env variable can be used instead
''' '''

56
cloud/openstack/README.md Normal file
View file

@ -0,0 +1,56 @@
OpenStack Ansible Modules
=========================
These are a set of modules for interacting with OpenStack as either an admin
or an end user. If the module does not begin with os_, it's either deprecated
or soon to be. This document serves as developer coding guidelines for
modules intended to be here.
Naming
------
* All modules should start with os_
* If the module is one that a cloud consumer would expect to use, it should be
named after the logical resource it manages. Thus, os\_server not os\_nova.
The reasoning for this is that there are more than one resource that are
managed by more than one service and which one manages it is a deployment
detail. A good example of this are floating IPs, which can come from either
Nova or Neutron, but which one they come from is immaterial to an end user.
* If the module is one that a cloud admin would expect to use, it should be
be named with the service and the resouce, such as os\_keystone\_domain.
* If the module is one that a cloud admin and a cloud consumer could both use,
the cloud consumer rules apply.
Interface
---------
* If the resource being managed has an id, it should be returned.
* If the resource being managed has an associated object more complex than
an id, it should also be returned.
Interoperability
----------------
* It should be assumed that the cloud consumer does not know a bazillion
details about the deployment choices their cloud provider made, and a best
effort should be made to present one sane interface to the ansible user
regardless of deployer insanity.
* All modules should work appropriately against all existing known public
OpenStack clouds.
* It should be assumed that a user may have more than one cloud account that
they wish to combine as part of a single ansible managed infrastructure.
Libraries
---------
* All modules should use openstack\_full\_argument\_spec to pick up the
standard input such as auth and ssl support.
* All modules should extends\_documentation\_fragment: openstack to go along
with openstack\_full\_argument\_spec.
* All complex cloud interaction or interoperability code should be housed in
the [shade](http://git.openstack.org/cgit/openstack-infra/shade) library.
* All OpenStack API interactions should happen via shade and not via
OpenStack Client libraries. The OpenStack Client libraries do no have end
users as a primary audience, they are for intra-server communication. The
python-openstacksdk is the future there, and shade will migrate to it when
its ready in a manner that is not noticable to ansible users.

View file

@ -20,6 +20,7 @@ DOCUMENTATION = '''
--- ---
module: glance_image module: glance_image
version_added: "1.2" version_added: "1.2"
deprecated: Deprecated in 1.10. Use os_image instead
short_description: Add/Delete images from glance short_description: Add/Delete images from glance
description: description:
- Add or Remove images from the glance repository. - Add or Remove images from the glance repository.

View file

@ -29,6 +29,10 @@ DOCUMENTATION = '''
--- ---
module: nova_keypair module: nova_keypair
version_added: "1.2" version_added: "1.2"
author:
- "Benno Joy (@bennojoy)"
- "Michael DeHaan"
deprecated: Deprecated in 2.0. Use os_keypair instead
short_description: Add/Delete key pair from nova short_description: Add/Delete key pair from nova
description: description:
- Add or Remove key pair from nova . - Add or Remove key pair from nova .
@ -97,7 +101,7 @@ def main():
state = dict(default='present', choices=['absent', 'present']) state = dict(default='present', choices=['absent', 'present'])
)) ))
module = AnsibleModule(argument_spec=argument_spec) module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_NOVACLIENT: if not HAS_NOVACLIENT:
module.fail_json(msg='python-novaclient is required for this module to work') module.fail_json(msg='python-novaclient is required for this module to work')
nova = nova_client.Client(module.params['login_username'], nova = nova_client.Client(module.params['login_username'],

View file

@ -33,6 +33,10 @@ DOCUMENTATION = '''
--- ---
module: quantum_floating_ip module: quantum_floating_ip
version_added: "1.2" version_added: "1.2"
author:
- "Benno Joy (@bennojoy)"
- "Brad P. Crochet (@bcrochet)"
deprecated: Deprecated in 2.0. Use os_floating_ip instead
short_description: Add/Remove floating IP from an instance short_description: Add/Remove floating IP from an instance
description: description:
- Add or Remove a floating IP to an instance - Add or Remove a floating IP to an instance

View file

@ -32,6 +32,8 @@ DOCUMENTATION = '''
--- ---
module: quantum_floating_ip_associate module: quantum_floating_ip_associate
version_added: "1.2" version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_floating_ip instead
short_description: Associate or disassociate a particular floating IP with an instance short_description: Associate or disassociate a particular floating IP with an instance
description: description:
- Associates or disassociates a specific floating IP with a particular instance - Associates or disassociates a specific floating IP with a particular instance

View file

@ -30,6 +30,7 @@ DOCUMENTATION = '''
--- ---
module: quantum_network module: quantum_network
version_added: "1.4" version_added: "1.4"
deprecated: Deprecated in 2.0. Use os_network instead
short_description: Creates/Removes networks from OpenStack short_description: Creates/Removes networks from OpenStack
description: description:
- Add or Remove network from OpenStack. - Add or Remove network from OpenStack.

View file

@ -75,7 +75,7 @@ options:
requirements: requirements:
- "python >= 2.6" - "python >= 2.6"
- python-keystoneclient - python-keystoneclient
author: Lorin Hochstein author: "Lorin Hochstein (@lorin)"
''' '''
EXAMPLES = ''' EXAMPLES = '''

View file

@ -27,6 +27,7 @@ DOCUMENTATION = '''
module: os_auth module: os_auth
short_description: Retrieve an auth token short_description: Retrieve an auth token
version_added: "2.0" version_added: "2.0"
author: "Monty Taylor (@emonty)"
description: description:
- Retrieve an auth token from an OpenStack Cloud - Retrieve an auth token from an OpenStack Cloud
requirements: requirements:

View file

@ -0,0 +1,74 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
version_added: "2.0"
notes:
- Facts are placed in the C(openstack.clouds) variable.
options:
clouds:
description:
- List of clouds to limit the return list to. No value means return
information on all configured clouds
required: false
default: []
requirements: [ os-client-config ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Get list of clouds that do not support security groups
- os-client-config:
- debug: var={{ item }}
with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}"
# Get the information back just about the mordred cloud
- os-client-config:
clouds:
- mordred
'''
def main():
module = AnsibleModule(argument_spec=dict(
clouds=dict(required=False, default=[]),
))
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = []
for cloud in config.get_all_clouds():
if not p['clouds'] or cloud.name in p['clouds']:
cloud.config['name'] = cloud.name
clouds.append(cloud.config)
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()

View file

@ -0,0 +1,198 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Author: Davide Guerri <davide.guerri@hp.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_floating_ip
version_added: "2.0"
short_description: Add/Remove floating IP from an instance
extends_documentation_fragment: openstack
description:
- Add or Remove a floating IP to an instance
options:
server:
description:
- The name or ID of the instance to which the IP address
should be assigned.
required: true
network:
description:
- The name or ID of a neutron external network or a nova pool name.
required: false
floating_ip_address:
description:
- A floating IP address to attach or to detach. Required only if state
is absent. When state is present can be used to specify a IP address
to attach.
required: false
reuse:
description:
- When state is present, and floating_ip_address is not present,
this parameter can be used to specify whether we should try to reuse
a floating IP address already allocated to the project.
required: false
default: false
fixed_address:
description:
- To which fixed IP of server the floating IP address should be
attached to.
required: false
wait:
description:
- When attaching a floating IP address, specify whether we should
wait for it to appear as attached.
required: false
default: false
timeout:
description:
- Time to wait for an IP address to appear as attached. See wait.
required: false
default: 60
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
required: false
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Assign a floating IP to the fist interface of `cattle001` from an exiting
# external network or nova pool. A new floating IP from the first available
# external network is allocated to the project.
- os_floating_ip:
cloud: dguerri
server: cattle001
# Assign a new floating IP to the instance fixed ip `192.0.2.3` of
# `cattle001`. If a free floating IP is already allocated to the project, it is
# reused; if not, a new one is created.
- os_floating_ip:
cloud: dguerri
state: present
reuse: yes
server: cattle001
network: ext_net
fixed_address: 192.0.2.3
wait: true
timeout: 180
# Detach a floating IP address from a server
- os_floating_ip:
cloud: dguerri
state: absent
floating_ip_address: 203.0.113.2
server: cattle001
'''
def _get_floating_ip(cloud, floating_ip_address):
f_ips = cloud.search_floating_ips(
filters={'floating_ip_address': floating_ip_address})
if not f_ips:
return None
return f_ips[0]
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
network=dict(required=False),
floating_ip_address=dict(required=False),
reuse=dict(required=False, type='bool', default=False),
fixed_address=dict(required=False),
wait=dict(required=False, type='bool', default=False),
timeout=dict(required=False, type='int', default=60),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
server_name_or_id = module.params['server']
state = module.params['state']
network = module.params['network']
floating_ip_address = module.params['floating_ip_address']
reuse = module.params['reuse']
fixed_address = module.params['fixed_address']
wait = module.params['wait']
timeout = module.params['timeout']
cloud = shade.openstack_cloud(**module.params)
try:
server = cloud.get_server(server_name_or_id)
if server is None:
module.fail_json(
msg="server {0} not found".format(server_name_or_id))
if state == 'present':
if floating_ip_address is None:
if reuse:
f_ip = cloud.available_floating_ip(network=network)
else:
f_ip = cloud.create_floating_ip(network=network)
else:
f_ip = _get_floating_ip(cloud, floating_ip_address)
if f_ip is None:
module.fail_json(
msg="floating IP {0} not found".format(
floating_ip_address))
cloud.attach_ip_to_server(
server_id=server['id'], floating_ip_id=f_ip['id'],
fixed_address=fixed_address, wait=wait, timeout=timeout)
# Update the floating IP status
f_ip = cloud.get_floating_ip(id=f_ip['id'])
module.exit_json(changed=True, floating_ip=f_ip)
elif state == 'absent':
if floating_ip_address is None:
module.fail_json(msg="floating_ip_address is required")
f_ip = _get_floating_ip(cloud, floating_ip_address)
cloud.detach_ip_from_server(
server_id=server['id'], floating_ip_id=f_ip['id'])
# Update the floating IP status
f_ip = cloud.get_floating_ip(id=f_ip['id'])
module.exit_json(changed=True, floating_ip=f_ip)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

188
cloud/openstack/os_image.py Normal file
View file

@ -0,0 +1,188 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#TODO(mordred): we need to support "location"(v1) and "locations"(v2)
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_image
short_description: Add/Delete images from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove images from the OpenStack Image Repository
options:
name:
description:
- Name that has to be given to the image
required: true
default: None
disk_format:
description:
- The format of the disk that is getting uploaded
required: false
default: qcow2
container_format:
description:
- The format of the container
required: false
default: bare
owner:
description:
- The owner of the image
required: false
default: None
min_disk:
description:
- The minimum disk space required to deploy this image
required: false
default: None
min_ram:
description:
- The minimum ram required to deploy this image
required: false
default: None
is_public:
description:
- Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default.
required: false
default: 'yes'
filename:
description:
- The path to the file which has to be uploaded
required: false
default: None
ramdisk:
descrption:
- The name of an existing ramdisk image that will be associated with this image
required: false
default: None
kernel:
descrption:
- The name of an existing kernel image that will be associated with this image
required: false
default: None
properties:
description:
- Additional properties to be associated with this image
required: false
default: {}
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
- os_image:
auth:
auth_url: http://localhost/auth/v2.0
username: admin
password: passme
project_name: admin
name: cirros
container_format: bare
disk_format: qcow2
state: present
filename: cirros-0.3.0-x86_64-disk.img
kernel: cirros-vmlinuz
ramdisk: cirros-initrd
properties:
cpu_arch: x86_64
distro: ubuntu
'''
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']),
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']),
owner = dict(default=None),
min_disk = dict(default=None),
min_ram = dict(default=None),
is_public = dict(default=False),
filename = dict(default=None),
ramdisk = dict(default=None),
kernel = dict(default=None),
properties = dict(default={}),
state = dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = False
image = cloud.get_image(name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not image:
image = cloud.create_image(
name=module.params['name'],
filename=module.params['filename'],
disk_format=module.params['disk_format'],
container_format=module.params['container_format'],
wait=module.params['wait'],
timeout=module.params['timeout']
)
changed = True
if not module.params['wait']:
module.exit_json(changed=changed, image=image, id=image.id)
cloud.update_image_properties(
image=image,
kernel=module.params['kernel'],
ramdisk=module.params['ramdisk'],
**module.params['properties'])
image = cloud.get_image(name_or_id=image.id)
module.exit_json(changed=changed, image=image, id=image.id)
elif module.params['state'] == 'absent':
if not image:
changed = False
else:
cloud.delete_image(
name_or_id=module.params['name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

View file

@ -0,0 +1,353 @@
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2014, Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
import jsonpatch
DOCUMENTATION = '''
---
module: os_ironic
short_description: Create/Delete Bare Metal Resources from OpenStack
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty)"
version_added: "2.0"
description:
- Create or Remove Ironic nodes from OpenStack.
options:
state:
description:
- Indicates desired state of the resource
choices: ['present', 'absent']
default: present
uuid:
description:
- globally unique identifier (UUID) to be given to the resource. Will
be auto-generated if not specified, and name is specified.
- Definition of a UUID will always take precedence to a name value.
required: false
default: None
name:
description:
- unique name identifier to be given to the resource.
required: false
default: None
driver:
description:
- The name of the Ironic Driver to use with this node.
required: true
default: None
chassis_uuid:
description:
- Associate the node with a pre-defined chassis.
required: false
default: None
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the
endpoint URL for the Ironic API. Use with "auth" and "auth_type"
settings set to None.
required: false
default: None
driver_info:
description:
- Information for this server's driver. Will vary based on which
driver is in use. Any sub-field which is populated will be validated
during creation.
suboptions:
power:
description:
- Information necessary to turn this server on / off.
This often includes such things as IPMI username, password, and IP address.
required: true
deploy:
description:
- Information necessary to deploy this server directly, without using Nova. THIS IS NOT RECOMMENDED.
console:
description:
- Information necessary to connect to this server's serial console. Not all drivers support this.
management:
description:
- Information necessary to interact with this server's management interface. May be shared by power_info in some cases.
required: true
nics:
description:
- 'A list of network interface cards, eg, " - mac: aa:bb:cc:aa:bb:cc"'
required: true
properties:
description:
- Definition of the physical characteristics of this server, used for scheduling purposes
suboptions:
cpu_arch:
description:
- CPU architecture (x86_64, i686, ...)
default: x86_64
cpus:
description:
- Number of CPU cores this machine has
default: 1
ram:
description:
- amount of RAM this machine has, in MB
default: 1
disk_size:
description:
- size of first storage device in this machine (typically /dev/sda), in GB
default: 1
skip_update_of_driver_password:
description:
- Allows the code that would assert changes to nodes to skip the
update if the change is a single line consisting of the password
field. As of Kilo, by default, passwords are always masked to API
requests, which means the logic as a result always attempts to
re-assert the password field.
required: false
default: false
requirements: ["shade", "jsonpatch"]
'''
EXAMPLES = '''
# Enroll a node with some basic properties and driver info
- os_ironic:
cloud: "devstack"
driver: "pxe_ipmitool"
uuid: "00000000-0000-0000-0000-000000000002"
properties:
cpus: 2
cpu_arch: "x86_64"
ram: 8192
disk_size: 64
nics:
- mac: "aa:bb:cc:aa:bb:cc"
- mac: "dd:ee:ff:dd:ee:ff"
driver_info:
power:
ipmi_address: "1.2.3.4"
ipmi_username: "admin"
ipmi_password: "adminpass"
chassis_uuid: "00000000-0000-0000-0000-000000000001"
'''
def _parse_properties(module):
p = module.params['properties']
props = dict(
cpu_arch=p.get('cpu_arch') if p.get('cpu_arch') else 'x86_64',
cpus=p.get('cpus') if p.get('cpus') else 1,
memory_mb=p.get('ram') if p.get('ram') else 1,
local_gb=p.get('disk_size') if p.get('disk_size') else 1,
)
return props
def _parse_driver_info(module):
p = module.params['driver_info']
info = p.get('power')
if not info:
raise shade.OpenStackCloudException(
"driver_info['power'] is required")
if p.get('console'):
info.update(p.get('console'))
if p.get('management'):
info.update(p.get('management'))
if p.get('deploy'):
info.update(p.get('deploy'))
return info
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
def _is_value_true(value):
true_values = [True, 'yes', 'Yes', 'True', 'true']
if value in true_values:
return True
return False
def _choose_if_password_only(module, patch):
if len(patch) is 1:
if 'password' in patch[0]['path'] and _is_value_true(
module.params['skip_update_of_masked_password']):
# Return false to aabort update as the password appears
# to be the only element in the patch.
return False
return True
def _exit_node_not_updated(module, server):
module.exit_json(
changed=False,
result="Node not updated",
uuid=server['uuid'],
provision_state=server['provision_state']
)
def main():
argument_spec = openstack_full_argument_spec(
uuid=dict(required=False),
name=dict(required=False),
driver=dict(required=False),
driver_info=dict(type='dict', required=True),
nics=dict(type='list', required=True),
properties=dict(type='dict', default={}),
ironic_url=dict(required=False),
chassis_uuid=dict(required=False),
skip_update_of_masked_password=dict(required=False, choices=BOOLEANS),
state=dict(required=False, default='present')
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears to be disabled, "
"Please define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
node_id = _choose_id_value(module)
try:
cloud = shade.operator_cloud(**module.params)
server = cloud.get_machine(node_id)
if module.params['state'] == 'present':
if module.params['driver'] is None:
module.fail_json(msg="A driver must be defined in order "
"to set a node to present.")
properties = _parse_properties(module)
driver_info = _parse_driver_info(module)
kwargs = dict(
driver=module.params['driver'],
properties=properties,
driver_info=driver_info,
name=module.params['name'],
)
if module.params['chassis_uuid']:
kwargs['chassis_uuid'] = module.params['chassis_uuid']
if server is None:
# Note(TheJulia): Add a specific UUID to the request if
# present in order to be able to re-use kwargs for if
# the node already exists logic, since uuid cannot be
# updated.
if module.params['uuid']:
kwargs['uuid'] = module.params['uuid']
server = cloud.register_machine(module.params['nics'],
**kwargs)
module.exit_json(changed=True, uuid=server['uuid'],
provision_state=server['provision_state'])
else:
# TODO(TheJulia): Presently this does not support updating
# nics. Support needs to be added.
#
# Note(TheJulia): This message should never get logged
# however we cannot realistically proceed if neither a
# name or uuid was supplied to begin with.
if not node_id:
module.fail_json(msg="A uuid or name value "
"must be defined")
# Note(TheJulia): Constructing the configuration to compare
# against. The items listed in the server_config block can
# be updated via the API.
server_config = dict(
driver=server['driver'],
properties=server['properties'],
driver_info=server['driver_info'],
name=server['name'],
)
# Add the pre-existing chassis_uuid only if
# it is present in the server configuration.
if hasattr(server, 'chassis_uuid'):
server_config['chassis_uuid'] = server['chassis_uuid']
# Note(TheJulia): If a password is defined and concealed, a
# patch will always be generated and re-asserted.
patch = jsonpatch.JsonPatch.from_diff(server_config, kwargs)
if not patch:
_exit_node_not_updated(module, server)
elif _choose_if_password_only(module, list(patch)):
# Note(TheJulia): Normally we would allow the general
# exception catch below, however this allows a specific
# message.
try:
server = cloud.patch_machine(
server['uuid'],
list(patch))
except Exception as e:
module.fail_json(msg="Failed to update node, "
"Error: %s" % e.message)
# Enumerate out a list of changed paths.
change_list = []
for change in list(patch):
change_list.append(change['path'])
module.exit_json(changed=True,
result="Node Updated",
changes=change_list,
uuid=server['uuid'],
provision_state=server['provision_state'])
# Return not updated by default as the conditions were not met
# to update.
_exit_node_not_updated(module, server)
if module.params['state'] == 'absent':
if not node_id:
module.fail_json(msg="A uuid or name value must be defined "
"in order to remove a node.")
if server is not None:
cloud.unregister_machine(module.params['nics'],
server['uuid'])
module.exit_json(changed=True, result="deleted")
else:
module.exit_json(changed=False, result="Server not found")
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

View file

@ -0,0 +1,333 @@
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2015, Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_ironic_node
short_description: Activate/Deactivate Bare Metal Resources from OpenStack
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Deploy to nodes controlled by Ironic.
options:
state:
description:
- Indicates desired state of the resource
choices: ['present', 'absent']
default: present
deploy:
description:
- Indicates if the resource should be deployed. Allows for deployment
logic to be disengaged and control of the node power or maintenance
state to be changed.
choices: ['true', 'false']
default: true
uuid:
description:
- globally unique identifier (UUID) to be given to the resource.
required: false
default: None
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the
endpoint URL for the Ironic API. Use with "auth" and "auth_type"
settings set to None.
required: false
default: None
config_drive:
description:
- A configdrive file or HTTP(S) URL that will be passed along to the
node.
required: false
default: None
instance_info:
description:
- Definition of the instance information which is used to deploy
the node. This information is only required when an instance is
set to present.
suboptions:
image_source:
description:
- An HTTP(S) URL where the image can be retrieved from.
image_checksum:
description:
- The checksum of image_source.
image_disk_format:
description:
- The type of image that has been requested to be deployed.
power:
description:
- A setting to allow power state to be asserted allowing nodes
that are not yet deployed to be powered on, and nodes that
are deployed to be powered off.
choices: ['present', 'absent']
default: present
maintenance:
description:
- A setting to allow the direct control if a node is in
maintenance mode.
required: false
default: false
maintenance_reason:
description:
- A string expression regarding the reason a node is in a
maintenance mode.
required: false
default: None
'''
EXAMPLES = '''
# Activate a node by booting an image with a configdrive attached
os_ironic_node:
cloud: "openstack"
uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
state: present
power: present
deploy: True
maintenance: False
config_drive: "http://192.168.1.1/host-configdrive.iso"
instance_info:
image_source: "http://192.168.1.1/deploy_image.img"
image_checksum: "356a6b55ecc511a20c33c946c4e678af"
image_disk_format: "qcow"
delegate_to: localhost
'''
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
# TODO(TheJulia): Change this over to use the machine patch method
# in shade once it is available.
def _prepare_instance_info_patch(instance_info):
patch = []
patch.append({
'op': 'replace',
'path': '/instance_info',
'value': instance_info
})
return patch
def _is_true(value):
true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on']
if value in true_values:
return True
return False
def _is_false(value):
false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off']
if value in false_values:
return True
return False
def _check_set_maintenance(module, cloud, node):
if _is_true(module.params['maintenance']):
if _is_false(node['maintenance']):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node has been set into "
"maintenance mode")
else:
# User has requested maintenance state, node is already in the
# desired state, checking to see if the reason has changed.
if (str(node['maintenance_reason']) not in
str(module.params['maintenance_reason'])):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node maintenance reason "
"updated, cannot take any "
"additional action.")
elif _is_false(module.params['maintenance']):
if node['maintenance'] is True:
cloud.remove_machine_from_maintenance(node['uuid'])
return True
else:
module.fail_json(msg="maintenance parameter was set but a valid "
"the value was not recognized.")
return False
def _check_set_power_state(module, cloud, node):
if 'power on' in str(node['power_state']):
if _is_false(module.params['power']):
# User has requested the node be powered off.
cloud.set_machine_power_off(node['uuid'])
module.exit_json(changed=True, msg="Power requested off")
if 'power off' in str(node['power_state']):
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
return False
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
module.exit_json(
changed=False,
msg="Power for node is %s, node must be reactivated "
"OR set to state absent"
)
# In the event the power has been toggled on and
# deployment has been requested, we need to skip this
# step.
if (_is_true(module.params['power']) and
_is_false(module.params['deploy'])):
# Node is powered down when it is not awaiting to be provisioned
cloud.set_machine_power_on(node['uuid'])
return True
# Default False if no action has been taken.
return False
def main():
argument_spec = openstack_full_argument_spec(
uuid=dict(required=False),
name=dict(required=False),
instance_info=dict(type='dict', required=False),
config_drive=dict(required=False),
ironic_url=dict(required=False),
state=dict(required=False, default='present'),
maintenance=dict(required=False),
maintenance_reason=dict(required=False),
power=dict(required=False, default='present'),
deploy=dict(required=False, default=True),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears disabled, Please "
"define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
node_id = _choose_id_value(module)
if not node_id:
module.fail_json(msg="A uuid or name value must be defined "
"to use this module.")
try:
cloud = shade.operator_cloud(**module.params)
node = cloud.get_machine(node_id)
if node is None:
module.fail_json(msg="node not found")
uuid = node['uuid']
instance_info = module.params['instance_info']
changed = False
# User has reqeusted desired state to be in maintenance state.
if module.params['state'] is 'maintenance':
module.params['maintenance'] = True
if node['provision_state'] in [
'cleaning',
'deleting',
'wait call-back']:
module.fail_json(msg="Node is in %s state, cannot act upon the "
"request as the node is in a transition "
"state" % node['provision_state'])
# TODO(TheJulia) This is in-development code, that requires
# code in the shade library that is still in development.
if _check_set_maintenance(module, cloud, node):
if node['provision_state'] in 'active':
module.exit_json(changed=True,
result="Maintenance state changed")
changed = True
node = cloud.get_machine(node_id)
if _check_set_power_state(module, cloud, node):
changed = True
node = cloud.get_machine(node_id)
if _is_true(module.params['state']):
if _is_false(module.params['deploy']):
module.exit_json(
changed=changed,
result="User request has explicitly disabled "
"deployment logic"
)
if 'active' in node['provision_state']:
module.exit_json(
changed=changed,
result="Node already in an active state."
)
if instance_info is None:
module.fail_json(
changed=changed,
msg="When setting an instance to present, "
"instance_info is a required variable.")
# TODO(TheJulia): Update instance info, however info is
# deployment specific. Perhaps consider adding rebuild
# support, although there is a known desire to remove
# rebuild support from Ironic at some point in the future.
patch = _prepare_instance_info_patch(instance_info)
cloud.set_node_instance_info(uuid, patch)
cloud.validate_node(uuid)
cloud.activate_node(uuid, module.params['config_drive'])
# TODO(TheJulia): Add more error checking and a wait option.
# We will need to loop, or just add the logic to shade,
# although this could be a very long running process as
# baremetal deployments are not a "quick" task.
module.exit_json(changed=changed, result="node activated")
elif _is_false(module.params['state']):
if node['provision_state'] not in "deleted":
cloud.purge_node_instance_info(uuid)
cloud.deactivate_node(uuid)
module.exit_json(changed=True, result="deleted")
else:
module.exit_json(changed=False, result="node not found")
else:
module.fail_json(msg="State must be present, absent, "
"maintenance, off")
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

View file

@ -0,0 +1,167 @@
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# Copyright (c) 2013, John Dewey <john@dewey.ws>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_keypair
short_description: Add/Delete a keypair from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove key pair from OpenStack
options:
name:
description:
- Name that has to be given to the key pair
required: true
default: None
public_key:
description:
- The public key that would be uploaded to nova and injected into VMs
upon creation.
required: false
default: None
public_key_file:
description:
- Path to local file containing ssh public key. Mutually exclusive
with public_key.
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: []
'''
EXAMPLES = '''
# Creates a key pair with the running users public key
- os_keypair:
cloud: mordred
state: present
name: ansible_key
public_key_file: /home/me/.ssh/id_rsa.pub
# Creates a new key pair and the private key returned after the run.
- os_keypair:
cloud: rax-dfw
state: present
name: ansible_key
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the keypair.
returned: success
type: string
public_key:
description: The public key value for the keypair.
returned: success
type: string
private_key:
description: The private key value for the keypair.
returned: Only when a keypair is generated for the user (e.g., when creating one
and a public key is not specified).
type: string
'''
def _system_state_change(module, keypair):
state = module.params['state']
if state == 'present' and not keypair:
return True
if state == 'absent' and keypair:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
public_key = dict(default=None),
public_key_file = dict(default=None),
state = dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[['public_key', 'public_key_file']])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
name = module.params['name']
public_key = module.params['public_key']
if module.params['public_key_file']:
public_key = open(module.params['public_key_file']).read()
public_key = public_key.rstrip()
try:
cloud = shade.openstack_cloud(**module.params)
keypair = cloud.get_keypair(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, keypair))
if state == 'present':
if keypair and keypair['name'] == name:
if public_key and (public_key != keypair['public_key']):
module.fail_json(
msg="Key name %s present but key hash not the same"
" as offered. Delete key first." % name
)
else:
module.exit_json(changed=False, key=keypair)
new_key = cloud.create_keypair(name, public_key)
module.exit_json(changed=True, key=new_key)
elif state == 'absent':
if keypair:
cloud.delete_keypair(name)
module.exit_json(changed=True)
module.exit_json(changed=False)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

View file

@ -0,0 +1,107 @@
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_network
short_description: Creates/Removes networks from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove network from OpenStack.
options:
name:
description:
- Name to be assigned to the network.
required: true
shared:
description:
- Whether this network is shared or not.
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down.
required: false
default: true
state:
description:
- Indicate desired state of the resource.
choices: ['present', 'absent']
required: false
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
- os_network:
name=t1network
state=present
'''
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
shared=dict(default=False, type='bool'),
admin_state_up=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
name = module.params['name']
shared = module.params['shared']
admin_state_up = module.params['admin_state_up']
try:
cloud = shade.openstack_cloud(**module.params)
net = cloud.get_network(name)
if state == 'present':
if not net:
net = cloud.create_network(name, shared, admin_state_up)
module.exit_json(changed=False, network=net, id=net['id'])
elif state == 'absent':
if not net:
module.exit_json(changed=False)
else:
cloud.delete_network(name)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

View file

@ -0,0 +1,237 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_nova_flavor
short_description: Manage OpenStack compute flavors
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Add or remove flavors from OpenStack.
options:
state:
description:
- Indicate desired state of the resource. When I(state) is 'present',
then I(ram), I(vcpus), and I(disk) are all required. There are no
default values for those parameters.
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Flavor name.
required: true
ram:
description:
- Amount of memory, in MB.
required: false
default: null
vcpus:
description:
- Number of virtual CPUs.
required: false
default: null
disk:
description:
- Size of local disk, in GB.
required: false
default: null
ephemeral:
description:
- Ephemeral space size, in GB.
required: false
default: 0
swap:
description:
- Swap space size, in MB.
required: false
default: 0
rxtx_factor:
description:
- RX/TX factor.
required: false
default: 1.0
is_public:
description:
- Make flavor accessible to the public.
required: false
default: true
flavorid:
description:
- ID for the flavor. This is optional as a unique UUID will be
assigned if a value is not specified.
required: false
default: "auto"
requirements: ["shade"]
'''
EXAMPLES = '''
# Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of
# local disk, and 10GB of ephemeral.
- os_nova_flavor:
cloud=mycloud
state=present
name=tiny
ram=1024
vcpus=1
disk=10
ephemeral=10
# Delete 'tiny' flavor
- os_nova_flavor:
cloud=mycloud
state=absent
name=tiny
'''
RETURN = '''
flavor:
description: Dictionary describing the flavor.
returned: On success when I(state) is 'present'
type: dictionary
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
'''
def _system_state_change(module, flavor):
state = module.params['state']
if state == 'present' and not flavor:
return True
if state == 'absent' and flavor:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
state = dict(required=False, default='present',
choices=['absent', 'present']),
name = dict(required=False),
# required when state is 'present'
ram = dict(required=False, type='int'),
vcpus = dict(required=False, type='int'),
disk = dict(required=False, type='int'),
ephemeral = dict(required=False, default=0, type='int'),
swap = dict(required=False, default=0, type='int'),
rxtx_factor = dict(required=False, default=1.0, type='float'),
is_public = dict(required=False, default=True, type='bool'),
flavorid = dict(required=False, default="auto"),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['ram', 'vcpus', 'disk'])
],
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
name = module.params['name']
try:
cloud = shade.operator_cloud(**module.params)
flavor = cloud.get_flavor(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, flavor))
if state == 'present':
if not flavor:
flavor = cloud.create_flavor(
name=name,
ram=module.params['ram'],
vcpus=module.params['vcpus'],
disk=module.params['disk'],
flavorid=module.params['flavorid'],
ephemeral=module.params['ephemeral'],
swap=module.params['swap'],
rxtx_factor=module.params['rxtx_factor'],
is_public=module.params['is_public']
)
module.exit_json(changed=True, flavor=flavor)
module.exit_json(changed=False, flavor=flavor)
elif state == 'absent':
if flavor:
cloud.delete_flavor(name)
module.exit_json(changed=True)
module.exit_json(changed=False)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

View file

@ -0,0 +1,125 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_object
short_description: Create or Delete objects and containers from OpenStack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
description:
- Create or Delete objects and containers from OpenStack
options:
container:
description:
- The name of the container in which to create the object
required: true
name:
description:
- Name to be give to the object. If omitted, operations will be on
the entire container
required: false
file:
description:
- Path to local file to be uploaded.
required: false
container_access:
description:
- desired container access level.
required: false
choices: ['private', 'public']
default: private
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Creates a object named 'fstab' in the 'config' container
- os_object: cloud=mordred state=present name=fstab container=config file=/etc/fstab
# Deletes a container called config and all of its contents
- os_object: cloud=rax-iad state=absent container=config
'''
def process_object(
cloud_obj, container, name, filename, container_access, **kwargs):
changed = False
container_obj = cloud_obj.get_container(container)
if kwargs['state'] == 'present':
if not container_obj:
container_obj = cloud_obj.create_container(container)
changed = True
if cloud_obj.get_container_access(container) != container_access:
cloud_obj.set_container_access(container, container_access)
changed = True
if name:
if cloud_obj.is_object_stale(container, name, filename):
cloud_obj.create_object(container, name, filename)
changed = True
else:
if container_obj:
if name:
if cloud_obj.get_object_metadata(container, name):
cloud_obj.delete_object(container, name)
changed= True
else:
cloud_obj.delete_container(container)
changed= True
return changed
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
container=dict(required=True),
filename=dict(required=False, default=None),
container_access=dict(default='private', choices=['private', 'public']),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = process_object(cloud, **module.params)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

View file

@ -0,0 +1,142 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_security_group
short_description: Add/Delete security groups from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty)"
version_added: "2.0"
description:
- Add or Remove security groups from an OpenStack cloud.
options:
name:
description:
- Name that has to be given to the security group. This module
requires that security group names be unique.
required: true
description:
description:
- Long description of the purpose of the security group
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Create a security group
- os_security_group:
cloud=mordred
state=present
name=foo
description=security group for foo servers
# Update the existing 'foo' security group description
- os_security_group:
cloud=mordred
state=present
name=foo
description=updated description for the foo security group
'''
def _needs_update(module, secgroup):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
if secgroup['description'] != module.params['description']:
return True
return False
def _system_state_change(module, secgroup):
state = module.params['state']
if state == 'present':
if not secgroup:
return True
return _needs_update(module, secgroup)
if state == 'absent' and secgroup:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
state = module.params['state']
description = module.params['description']
try:
cloud = shade.openstack_cloud(**module.params)
secgroup = cloud.get_security_group(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup))
changed = False
if state == 'present':
if not secgroup:
secgroup = cloud.create_security_group(name, description)
changed = True
else:
if _needs_update(module, secgroup):
secgroup = cloud.update_security_group(
secgroup['id'], description=description)
changed = True
module.exit_json(
changed=changed, id=secgroup['id'], secgroup=secgroup)
if state == 'absent':
if secgroup:
cloud.delete_security_group(secgroup['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

View file

@ -0,0 +1,325 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_security_group_rule
short_description: Add/Delete rule from an existing security group
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove rule from an existing security group
options:
security_group:
description:
- Name of the security group
required: true
protocol:
description:
- IP protocol
choices: ['tcp', 'udp', 'icmp', None]
default: None
port_range_min:
description:
- Starting port
required: false
default: None
port_range_max:
description:
- Ending port
required: false
default: None
remote_ip_prefix:
description:
- Source IP address(es) in CIDR notation (exclusive with remote_group)
required: false
remote_group:
description:
- ID of Security group to link (exclusive with remote_ip_prefix)
required: false
ethertype:
description:
- Must be IPv4 or IPv6, and addresses represented in CIDR must
match the ingress or egress rules. Not all providers support IPv6.
choices: ['IPv4', 'IPv6']
default: IPv4
direction:
description:
- The direction in which the security group rule is applied. Not
all providers support egress.
choices: ['egress', 'ingress']
default: ingress
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a security group rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 80
port_range_max: 80
remote_ip_prefix: 0.0.0.0/0
# Create a security group rule for ping
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
remote_ip_prefix: 0.0.0.0/0
# Another way to create the ping rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
port_range_min: -1
port_range_max: -1
remote_ip_prefix: 0.0.0.0/0
# Create a TCP rule covering all ports
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 1
port_range_max: 65535
remote_ip_prefix: 0.0.0.0/0
# Another way to create the TCP rule above (defaults to all ports)
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
remote_ip_prefix: 0.0.0.0/0
'''
RETURN = '''
id:
description: Unique rule UUID.
type: string
direction:
description: The direction in which the security group rule is applied.
type: string
sample: 'egress'
ethertype:
description: One of IPv4 or IPv6.
type: string
sample: 'IPv4'
port_range_min:
description: The minimum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
port_range_max:
description: The maximum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
protocol:
description: The protocol that is matched by the security group rule.
type: string
sample: 'tcp'
remote_ip_prefix:
description: The remote IP prefix to be associated with this security group rule.
type: string
sample: '0.0.0.0/0'
security_group_id:
description: The security group ID to associate with this security group rule.
type: string
'''
def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
"""
Capture the complex port matching logic.
The port values coming in for the module might be -1 (for ICMP),
which will work only for Nova, but this is handled by shade. Likewise,
they might be None, which works for Neutron, but not Nova. This too is
handled by shade. Since shade will consistently return these port
values as None, we need to convert any -1 values input to the module
to None here for comparison.
For TCP and UDP protocols, None values for both min and max are
represented as the range 1-65535 for Nova, but remain None for
Neutron. Shade returns the full range when Nova is the backend (since
that is how Nova stores them), and None values for Neutron. If None
values are input to the module for both values, then we need to adjust
for comparison.
"""
# Check if the user is supplying -1 for ICMP.
if protocol == 'icmp':
if module_min and int(module_min) == -1:
module_min = None
if module_max and int(module_max) == -1:
module_max = None
# Check if user is supplying None values for full TCP/UDP port range.
if protocol in ['tcp', 'udp'] and module_min is None and module_max is None:
if (rule_min and int(rule_min) == 1
and rule_max and int(rule_max) == 65535):
# (None, None) == (1, 65535)
return True
# Sanity check to make sure we don't have type comparison issues.
if module_min:
module_min = int(module_min)
if module_max:
module_max = int(module_max)
if rule_min:
rule_min = int(rule_min)
if rule_max:
rule_max = int(rule_max)
return module_min == rule_min and module_max == rule_max
def _find_matching_rule(module, secgroup):
"""
Find a rule in the group that matches the module parameters.
:returns: The matching rule dict, or None if no matches.
"""
protocol = module.params['protocol']
remote_ip_prefix = module.params['remote_ip_prefix']
ethertype = module.params['ethertype']
direction = module.params['direction']
for rule in secgroup['security_group_rules']:
if (protocol == rule['protocol']
and remote_ip_prefix == rule['remote_ip_prefix']
and ethertype == rule['ethertype']
and direction == rule['direction']
and _ports_match(protocol,
module.params['port_range_min'],
module.params['port_range_max'],
rule['port_range_min'],
rule['port_range_max'])):
return rule
return None
def _system_state_change(module, secgroup):
state = module.params['state']
if secgroup:
rule_exists = _find_matching_rule(module, secgroup)
else:
return False
if state == 'present' and not rule_exists:
return True
if state == 'absent' and rule_exists:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
security_group = dict(required=True),
# NOTE(Shrews): None is an acceptable protocol value for
# Neutron, but Nova will balk at this.
protocol = dict(default=None,
choices=[None, 'tcp', 'udp', 'icmp']),
port_range_min = dict(required=False, type='int'),
port_range_max = dict(required=False, type='int'),
remote_ip_prefix = dict(required=False, default=None),
# TODO(mordred): Make remote_group handle name and id
remote_group = dict(required=False, default=None),
ethertype = dict(default='IPv4',
choices=['IPv4', 'IPv6']),
direction = dict(default='ingress',
choices=['egress', 'ingress']),
state = dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['remote_ip_prefix', 'remote_group'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
security_group = module.params['security_group']
changed = False
try:
cloud = shade.openstack_cloud(**module.params)
secgroup = cloud.get_security_group(security_group)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup))
if state == 'present':
if not secgroup:
module.fail_json(msg='Could not find security group %s' %
security_group)
rule = _find_matching_rule(module, secgroup)
if not rule:
rule = cloud.create_security_group_rule(
secgroup['id'],
port_range_min=module.params['port_range_min'],
port_range_max=module.params['port_range_max'],
protocol=module.params['protocol'],
remote_ip_prefix=module.params['remote_ip_prefix'],
remote_group_id=module.params['remote_group'],
direction=module.params['direction'],
ethertype=module.params['ethertype']
)
changed = True
module.exit_json(changed=changed, rule=rule, id=rule['id'])
if state == 'absent' and secgroup:
rule = _find_matching_rule(module, secgroup)
if rule:
cloud.delete_security_group_rule(rule['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

View file

@ -33,6 +33,7 @@ module: os_server
short_description: Create/Delete Compute Instances from OpenStack short_description: Create/Delete Compute Instances from OpenStack
extends_documentation_fragment: openstack extends_documentation_fragment: openstack
version_added: "2.0" version_added: "2.0"
author: "Monty Taylor (@emonty)"
description: description:
- Create or Remove compute instances from OpenStack. - Create or Remove compute instances from OpenStack.
options: options:
@ -89,6 +90,11 @@ options:
- Ensure instance has public ip however the cloud wants to do that - Ensure instance has public ip however the cloud wants to do that
required: false required: false
default: 'yes' default: 'yes'
auto_floating_ip:
description:
- If the module should automatically assign a floating IP
required: false
default: 'yes'
floating_ips: floating_ips:
description: description:
- list of valid floating IPs that pre-exist to assign to this node - list of valid floating IPs that pre-exist to assign to this node
@ -240,7 +246,8 @@ EXAMPLES = '''
def _exit_hostvars(module, cloud, server, changed=True): def _exit_hostvars(module, cloud, server, changed=True):
hostvars = meta.get_hostvars_from_server(cloud, server) hostvars = meta.get_hostvars_from_server(cloud, server)
module.exit_json(changed=changed, id=server.id, openstack=hostvars) module.exit_json(
changed=changed, server=server, id=server.id, openstack=hostvars)
def _network_args(module, cloud): def _network_args(module, cloud):

View file

@ -0,0 +1,193 @@
#!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2015, Jesse Keating <jlk@derpops.bike>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_actions
short_description: Perform actions on Compute Instances from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Jesse Keating (@j2sol)"
description:
- Perform server actions on an existing compute instance from OpenStack.
This module does not return any data other than changed true/false.
options:
server:
description:
- Name or ID of the instance
required: true
wait:
description:
- If the module should wait for the instance action to be performed.
required: false
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the instance to perform
the requested action.
required: false
default: 180
action:
description:
- Perform the given action. The lock and unlock actions always return
changed as the servers API does not provide lock status.
choices: [pause, unpause, lock, unlock, suspend, resume]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Pauses a compute instance
- os_server_actions:
action: pause
auth:
auth_url: https://mycloud.openstack.blueboxgrid.com:5001/v2.0
username: admin
password: admin
project_name: admin
server: vm1
timeout: 200
'''
_action_map = {'pause': 'PAUSED',
'unpause': 'ACTIVE',
'lock': 'ACTIVE', # API doesn't show lock/unlock status
'unlock': 'ACTIVE',
'suspend': 'SUSPENDED',
'resume': 'ACTIVE',}
_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock']
def _wait(timeout, cloud, server, action):
"""Wait for the server to reach the desired state for the given action."""
for count in shade._iterate_timeout(
timeout,
"Timeout waiting for server to complete %s" % action):
try:
server = cloud.get_server(server.id)
except Exception:
continue
if server.status == _action_map[action]:
return
if server.status == 'ERROR':
module.fail_json(msg="Server reached ERROR state while attempting to %s" % action)
def _system_state_change(action, status):
"""Check if system state would change."""
if status == _action_map[action]:
return False
return True
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
action=dict(required=True, choices=['pause', 'unpause', 'lock', 'unlock', 'suspend',
'resume']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
action = module.params['action']
wait = module.params['wait']
timeout = module.params['timeout']
try:
if action in _admin_actions:
cloud = shade.operator_cloud(**module.params)
else:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
if not server:
module.fail_json(msg='Could not find server %s' % server)
status = server.status
if module.check_mode:
module.exit_json(changed=_system_state_change(action, status))
if action == 'pause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.pause(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'unpause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.unpause(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'lock':
# lock doesn't set a state, just do it
cloud.nova_client.servers.lock(server=server.id)
module.exit_json(changed=True)
elif action == 'unlock':
# unlock doesn't set a state, just do it
cloud.nova_client.servers.unlock(server=server.id)
module.exit_json(changed=True)
elif action == 'suspend':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.suspend(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'resume':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.resume(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

View file

@ -27,6 +27,7 @@ DOCUMENTATION = '''
module: os_server_facts module: os_server_facts
short_description: Retrieve facts about a compute instance short_description: Retrieve facts about a compute instance
version_added: "2.0" version_added: "2.0"
author: "Monty Taylor (@emonty)"
description: description:
- Retrieve facts about a server instance from OpenStack. - Retrieve facts about a server instance from OpenStack.
notes: notes:

View file

@ -31,6 +31,7 @@ module: os_server_volume
short_description: Attach/Detach Volumes from OpenStack VM's short_description: Attach/Detach Volumes from OpenStack VM's
extends_documentation_fragment: openstack extends_documentation_fragment: openstack
version_added: "2.0" version_added: "2.0"
author: "Monty Taylor (@emonty)"
description: description:
- Attach or Detach volumes from OpenStack VM's - Attach or Detach volumes from OpenStack VM's
options: options:

View file

@ -29,6 +29,7 @@ module: os_subnet
short_description: Add/Remove subnet to an OpenStack network short_description: Add/Remove subnet to an OpenStack network
extends_documentation_fragment: openstack extends_documentation_fragment: openstack
version_added: "2.0" version_added: "2.0"
author: "Monty Taylor (@emonty)"
description: description:
- Add or Remove a subnet to an OpenStack network - Add or Remove a subnet to an OpenStack network
options: options:
@ -91,6 +92,18 @@ options:
- A list of host route dictionaries for the subnet. - A list of host route dictionaries for the subnet.
required: false required: false
default: None default: None
ipv6_ra_mode:
description:
- IPv6 router advertisement mode
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
required: false
default: None
ipv6_address_mode:
description:
- IPv6 address mode
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
required: false
default: None
requirements: requirements:
- "python >= 2.6" - "python >= 2.6"
- "shade" - "shade"
@ -116,6 +129,19 @@ EXAMPLES = '''
- os_subnet: - os_subnet:
state=absent state=absent
name=net1subnet name=net1subnet
# Create an ipv6 stateless subnet
- os_subnet:
state: present
name: intv6
network_name: internal
ip_version: 6
cidr: 2db8:1::/64
dns_nameservers:
- 2001:4860:4860::8888
- 2001:4860:4860::8844
ipv6_ra_mode: dhcpv6-stateless
ipv6_address_mode: dhcpv6-stateless
''' '''
@ -162,6 +188,7 @@ def _system_state_change(module, subnet):
def main(): def main():
ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
argument_spec = openstack_full_argument_spec( argument_spec = openstack_full_argument_spec(
name=dict(required=True), name=dict(required=True),
network_name=dict(default=None), network_name=dict(default=None),
@ -173,6 +200,9 @@ def main():
allocation_pool_start=dict(default=None), allocation_pool_start=dict(default=None),
allocation_pool_end=dict(default=None), allocation_pool_end=dict(default=None),
host_routes=dict(default=None, type='list'), host_routes=dict(default=None, type='list'),
ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices),
ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices),
state=dict(default='present', choices=['absent', 'present']),
) )
module_kwargs = openstack_module_kwargs() module_kwargs = openstack_module_kwargs()
@ -194,6 +224,8 @@ def main():
pool_start = module.params['allocation_pool_start'] pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end'] pool_end = module.params['allocation_pool_end']
host_routes = module.params['host_routes'] host_routes = module.params['host_routes']
ipv6_ra_mode = module.params['ipv6_ra_mode']
ipv6_a_mode = module.params['ipv6_address_mode']
# Check for required parameters when state == 'present' # Check for required parameters when state == 'present'
if state == 'present': if state == 'present':
@ -224,8 +256,10 @@ def main():
gateway_ip=gateway_ip, gateway_ip=gateway_ip,
dns_nameservers=dns, dns_nameservers=dns,
allocation_pools=pool, allocation_pools=pool,
host_routes=host_routes) host_routes=host_routes,
module.exit_json(changed=True, result="created") ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_a_mode)
changed = True
else: else:
if _needs_update(subnet, module): if _needs_update(subnet, module):
cloud.update_subnet(subnet['id'], cloud.update_subnet(subnet['id'],
@ -234,17 +268,21 @@ def main():
gateway_ip=gateway_ip, gateway_ip=gateway_ip,
dns_nameservers=dns, dns_nameservers=dns,
allocation_pools=pool, allocation_pools=pool,
host_routes=host_routes) host_routes=host_routes,
module.exit_json(changed=True, result="updated") ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_a_mode)
changed = True
else: else:
module.exit_json(changed=False, result="success") changed = False
module.exit_json(changed=changed)
elif state == 'absent': elif state == 'absent':
if not subnet: if not subnet:
module.exit_json(changed=False, result="success") changed = False
else: else:
changed = True
cloud.delete_subnet(subnet_name) cloud.delete_subnet(subnet_name)
module.exit_json(changed=True, result="deleted") module.exit_json(changed=changed)
except shade.OpenStackCloudException as e: except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message) module.fail_json(msg=e.message)

View file

@ -29,6 +29,7 @@ module: os_volume
short_description: Create/Delete Cinder Volumes short_description: Create/Delete Cinder Volumes
extends_documentation_fragment: openstack extends_documentation_fragment: openstack
version_added: "2.0" version_added: "2.0"
author: "Monty Taylor (@emonty)"
description: description:
- Create or Remove cinder block storage volumes - Create or Remove cinder block storage volumes
options: options:
@ -89,7 +90,7 @@ EXAMPLES = '''
def _present_volume(module, cloud): def _present_volume(module, cloud):
if cloud.volume_exists(module.params['display_name']): if cloud.volume_exists(module.params['display_name']):
v = cloud.get_volume(module.params['display_name']) v = cloud.get_volume(module.params['display_name'])
module.exit_json(changed=False, id=v['id']) module.exit_json(changed=False, id=v['id'], volume=v)
volume_args = dict( volume_args = dict(
size=module.params['size'], size=module.params['size'],
@ -106,7 +107,7 @@ def _present_volume(module, cloud):
volume = cloud.create_volume( volume = cloud.create_volume(
wait=module.params['wait'], timeout=module.params['timeout'], wait=module.params['wait'], timeout=module.params['timeout'],
**volume_args) **volume_args)
module.exit_json(changed=True, id=volume['id']) module.exit_json(changed=True, id=volume['id'], volume=volume)
def _absent_volume(module, cloud): def _absent_volume(module, cloud):
@ -116,8 +117,8 @@ def _absent_volume(module, cloud):
wait=module.params['wait'], wait=module.params['wait'],
timeout=module.params['timeout']) timeout=module.params['timeout'])
except shade.OpenStackCloudTimeout: except shade.OpenStackCloudTimeout:
module.exit_json(changed=False, result="Volume deletion timed-out") module.exit_json(changed=False)
module.exit_json(changed=True, result='Volume Deleted') module.exit_json(changed=True)
def main(): def main():

View file

@ -30,6 +30,7 @@ DOCUMENTATION = '''
--- ---
module: quantum_router module: quantum_router
version_added: "1.2" version_added: "1.2"
author: "Benno Joy (@bennojoy)"
short_description: Create or Remove router from openstack short_description: Create or Remove router from openstack
description: description:
- Create or Delete routers from OpenStack - Create or Delete routers from OpenStack

View file

@ -30,6 +30,7 @@ DOCUMENTATION = '''
--- ---
module: quantum_router_gateway module: quantum_router_gateway
version_added: "1.2" version_added: "1.2"
author: "Benno Joy (@bennojoy)"
short_description: set/unset a gateway interface for the router with the specified external network short_description: set/unset a gateway interface for the router with the specified external network
description: description:
- Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic. - Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic.

View file

@ -30,6 +30,7 @@ DOCUMENTATION = '''
--- ---
module: quantum_router_interface module: quantum_router_interface
version_added: "1.2" version_added: "1.2"
author: "Benno Joy (@bennojoy)"
short_description: Attach/Dettach a subnet's interface to a router short_description: Attach/Dettach a subnet's interface to a router
description: description:
- Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet. - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet.

View file

@ -182,7 +182,9 @@ options:
description: description:
- how long before wait gives up, in seconds - how long before wait gives up, in seconds
default: 300 default: 300
author: Jesse Keating, Matt Martz author:
- "Jesse Keating (@j2sol)"
- "Matt Martz (@sivel)"
notes: notes:
- I(exact_count) can be "destructive" if the number of running servers in - I(exact_count) can be "destructive" if the number of running servers in
the I(group) is larger than that specified in I(count). In such a case, the the I(group) is larger than that specified in I(count). In such a case, the

View file

@ -79,7 +79,9 @@ options:
description: description:
- how long before wait gives up, in seconds - how long before wait gives up, in seconds
default: 300 default: 300
author: Christopher H. Laco, Matt Martz author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack extends_documentation_fragment: rackspace.openstack
''' '''

View file

@ -58,7 +58,9 @@ options:
description: description:
- how long before wait gives up, in seconds - how long before wait gives up, in seconds
default: 300 default: 300
author: Christopher H. Laco, Matt Martz author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack extends_documentation_fragment: rackspace.openstack
''' '''

View file

@ -52,7 +52,7 @@ options:
description: description:
- how long before wait gives up, in seconds - how long before wait gives up, in seconds
default: 300 default: 300
author: Simon JAILLET author: "Simon JAILLET (@jails)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -44,7 +44,7 @@ options:
- Indicate desired state of the resource - Indicate desired state of the resource
choices: ['present', 'absent'] choices: ['present', 'absent']
default: present default: present
author: Simon JAILLET author: "Simon JAILLET (@jails)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -51,7 +51,7 @@ options:
- Indicate desired state of the resource - Indicate desired state of the resource
choices: ['present', 'absent'] choices: ['present', 'absent']
default: present default: present
author: Simon JAILLET author: "Simon JAILLET (@jails)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -103,7 +103,9 @@ options:
description: description:
- how long before wait gives up, in seconds - how long before wait gives up, in seconds
default: 300 default: 300
author: Christopher H. Laco, Matt Martz author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -85,7 +85,7 @@ options:
required: false required: false
description: description:
- Weight of node - Weight of node
author: Lukasz Kawczynski author: "Lukasz Kawczynski (@neuroid)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -48,7 +48,7 @@ notes:
- "It is recommended that plays utilizing this module be run with - "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API" the Rackspace CloudDNS API"
author: Matt Martz author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -84,7 +84,7 @@ notes:
supplied supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7 - C(PTR) record support was added in version 1.7
author: Matt Martz author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -35,7 +35,7 @@ options:
description: description:
- Server name to retrieve facts for - Server name to retrieve facts for
default: null default: null
author: Matt Martz author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack extends_documentation_fragment: rackspace.openstack
''' '''

View file

@ -76,7 +76,7 @@ options:
web_index: web_index:
description: description:
- Sets an object to be presented as the HTTP index page when accessed by the CDN URL - Sets an object to be presented as the HTTP index page when accessed by the CDN URL
author: Paul Durivage author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -92,7 +92,7 @@ options:
- file - file
- meta - meta
default: file default: file
author: Paul Durivage author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -29,7 +29,9 @@ options:
- Indicate desired state of the resource - Indicate desired state of the resource
choices: ['present', 'absent'] choices: ['present', 'absent']
default: present default: present
author: Christopher H. Laco, Matt Martz author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack extends_documentation_fragment: rackspace.openstack
''' '''

View file

@ -39,7 +39,7 @@ options:
- present - present
- absent - absent
default: present default: present
author: Matt Martz author: "Matt Martz (@sivel)"
notes: notes:
- Keypairs cannot be manipulated, only created and deleted. To "update" a - Keypairs cannot be manipulated, only created and deleted. To "update" a
keypair you must first delete and then recreate. keypair you must first delete and then recreate.

View file

@ -39,7 +39,7 @@ options:
description: description:
- A hash of metadata to associate with the instance - A hash of metadata to associate with the instance
default: null default: null
author: Matt Martz author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack extends_documentation_fragment: rackspace.openstack
''' '''

View file

@ -39,7 +39,9 @@ options:
description: description:
- cidr of the network being created - cidr of the network being created
default: null default: null
author: Christopher H. Laco, Jesse Keating author:
- "Christopher H. Laco (@claco)"
- "Jesse Keating (@j2sol)"
extends_documentation_fragment: rackspace.openstack extends_documentation_fragment: rackspace.openstack
''' '''

View file

@ -35,7 +35,9 @@ options:
- present - present
- absent - absent
default: present default: present
author: Christopher H. Laco, Matt Martz author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -105,7 +105,7 @@ options:
- Data to be uploaded to the servers config drive. This option implies - Data to be uploaded to the servers config drive. This option implies
I(config_drive). Can be a file path or a string I(config_drive). Can be a file path or a string
version_added: 1.8 version_added: 1.8
author: Matt Martz author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''
@ -263,7 +263,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
lc = sg.get_launch_config() lc = sg.get_launch_config()
lc_args = {} lc_args = {}
if server_name != lc.get('name'): if server_name != lc.get('name'):
lc_args['name'] = server_name lc_args['server_name'] = server_name
if image != lc.get('image'): if image != lc.get('image'):
lc_args['image'] = image lc_args['image'] = image
@ -273,7 +273,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
disk_config = disk_config or 'AUTO' disk_config = disk_config or 'AUTO'
if ((disk_config or lc.get('disk_config')) and if ((disk_config or lc.get('disk_config')) and
disk_config != lc.get('disk_config')): disk_config != lc.get('disk_config', 'AUTO')):
lc_args['disk_config'] = disk_config lc_args['disk_config'] = disk_config
if (meta or lc.get('meta')) and meta != lc.get('metadata'): if (meta or lc.get('meta')) and meta != lc.get('metadata'):
@ -299,7 +299,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
if key_name != lc.get('key_name'): if key_name != lc.get('key_name'):
lc_args['key_name'] = key_name lc_args['key_name'] = key_name
if config_drive != lc.get('config_drive'): if config_drive != lc.get('config_drive', False):
lc_args['config_drive'] = config_drive lc_args['config_drive'] = config_drive
if (user_data and if (user_data and

View file

@ -73,7 +73,7 @@ options:
- present - present
- absent - absent
default: present default: present
author: Matt Martz author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''

View file

@ -65,13 +65,13 @@ options:
default: null default: null
state: state:
description: description:
- Indicate desired state of the vm. - Indicate desired state of the vm. 'reconfigured' only applies changes to 'memory_mb' and 'num_cpus' in vm_hardware parameter, and only when hot-plugging is enabled for the guest.
default: present default: present
choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured']
from_template: from_template:
version_added: "1.9" version_added: "1.9"
description: description:
- Specifies if the VM should be deployed from a template (cannot be ran with state) - Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template.
default: no default: no
choices: ['yes', 'no'] choices: ['yes', 'no']
template_src: template_src:
@ -79,6 +79,12 @@ options:
description: description:
- Name of the source template to deploy from - Name of the source template to deploy from
default: None default: None
snapshot_to_clone:
description:
- A string that when specified, will create a linked clone copy of the VM. Snapshot must already be taken in vCenter.
version_added: "2.0"
required: false
default: none
vm_disk: vm_disk:
description: description:
- A key, value list of disks and their sizes and which datastore to keep it in. - A key, value list of disks and their sizes and which datastore to keep it in.
@ -119,7 +125,7 @@ options:
notes: notes:
- This module should run from a system that can access vSphere directly. - This module should run from a system that can access vSphere directly.
Either by using local_action, or using delegate_to. Either by using local_action, or using delegate_to.
author: Richard Hoop <wrhoop@gmail.com> author: "Richard Hoop (@rhoop) <wrhoop@gmail.com>"
requirements: requirements:
- "python >= 2.6" - "python >= 2.6"
- pysphere - pysphere
@ -153,11 +159,18 @@ EXAMPLES = '''
type: vmxnet3 type: vmxnet3
network: VM Network network: VM Network
network_type: standard network_type: standard
nic2:
type: vmxnet3
network: dvSwitch Network
network_type: dvs
vm_hardware: vm_hardware:
memory_mb: 2048 memory_mb: 2048
num_cpus: 2 num_cpus: 2
osid: centos64Guest osid: centos64Guest
scsi: paravirtual scsi: paravirtual
vm_cdrom:
type: "iso"
iso_path: "DatastoreName/cd-image.iso"
esxi: esxi:
datacenter: MyDatacenter datacenter: MyDatacenter
hostname: esx001.mydomain.local hostname: esx001.mydomain.local
@ -195,7 +208,6 @@ EXAMPLES = '''
hostname: esx001.mydomain.local hostname: esx001.mydomain.local
# Deploy a guest from a template # Deploy a guest from a template
# No reconfiguration of the destination guest is done at this stage, a reconfigure would be needed to adjust memory/cpu etc..
- vsphere_guest: - vsphere_guest:
vcenter_hostname: vcenter.mydomain.local vcenter_hostname: vcenter.mydomain.local
username: myuser username: myuser
@ -410,13 +422,21 @@ def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name=
def find_datastore(module, s, datastore, config_target): def find_datastore(module, s, datastore, config_target):
# Verify the datastore exists and put it in brackets if it does. # Verify the datastore exists and put it in brackets if it does.
ds = None ds = None
for d in config_target.Datastore: if config_target:
if (d.Datastore.Accessible and for d in config_target.Datastore:
(datastore and d.Datastore.Name == datastore) if (d.Datastore.Accessible and
or (not datastore)): (datastore and d.Datastore.Name == datastore)
ds = d.Datastore.Datastore or (not datastore)):
datastore = d.Datastore.Name ds = d.Datastore.Datastore
break datastore = d.Datastore.Name
break
else:
for ds_mor, ds_name in server.get_datastores().items():
ds_props = VIProperty(s, ds_mor)
if (ds_props.summary.accessible and (datastore and ds_name == datastore)
or (not datastore)):
ds = ds_mor
datastore = ds_name
if not ds: if not ds:
s.disconnect() s.disconnect()
module.fail_json(msg="Datastore: %s does not appear to exist" % module.fail_json(msg="Datastore: %s does not appear to exist" %
@ -515,26 +535,78 @@ def vmdisk_id(vm, current_datastore_name):
return id_list return id_list
def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name): def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone):
vmTemplate = vsphere_client.get_vm_by_name(template_src) vmTemplate = vsphere_client.get_vm_by_name(template_src)
vmTarget = None vmTarget = None
try: if esxi:
cluster = [k for k, datacenter = esxi['datacenter']
v in vsphere_client.get_clusters().items() if v == cluster_name][0] esxi_hostname = esxi['hostname']
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try: # Datacenter managed object reference
rpmor = [k for k, v in vsphere_client.get_resource_pools( dclist = [k for k,
from_mor=cluster).items() v in vsphere_client.get_datacenters().items() if v == datacenter]
if v == resource_pool][0] if dclist:
except IndexError, e: dcmor=dclist[0]
vsphere_client.disconnect() else:
module.fail_json(msg="Cannot find Resource Pool named: %s" % vsphere_client.disconnect()
resource_pool) module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
# hostFolder managed reference
hfmor = dcprops.hostFolder._obj
# Grab the computerResource name and host properties
crmors = vsphere_client._retrieve_properties_traversal(
property_names=['name', 'host'],
from_node=hfmor,
obj_type='ComputeResource')
# Grab the host managed object reference of the esxi_hostname
try:
hostmor = [k for k,
v in vsphere_client.get_hosts().items() if v == esxi_hostname][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname)
# Grab the computeResource managed object reference of the host we are
# creating the VM on.
crmor = None
for cr in crmors:
if crmor:
break
for p in cr.PropSet:
if p.Name == "host":
for h in p.Val.get_element_ManagedObjectReference():
if h == hostmor:
crmor = cr.Obj
break
if crmor:
break
crprops = VIProperty(vsphere_client, crmor)
rpmor = crprops.resourcePool._obj
elif resource_pool:
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
else:
module.fail_json(msg="You need to specify either esxi:[datacenter,hostname] or [cluster,resource_pool]")
try: try:
vmTarget = vsphere_client.get_vm_by_name(guest) vmTarget = vsphere_client.get_vm_by_name(guest)
@ -547,9 +619,14 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
try: try:
if vmTarget: if vmTarget:
changed = False changed = False
elif snapshot_to_clone is not None:
#check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
vmTemplate.clone(guest, resourcepool=rpmor, linked=True, snapshot=snapshot_to_clone)
changed = True
else: else:
vmTemplate.clone(guest, resourcepool=rpmor) vmTemplate.clone(guest, resourcepool=rpmor)
changed = True changed = True
vsphere_client.disconnect() vsphere_client.disconnect()
module.exit_json(changed=changed) module.exit_json(changed=changed)
except Exception as e: except Exception as e:
@ -564,13 +641,14 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
changes = {} changes = {}
request = VI.ReconfigVM_TaskRequestMsg() request = VI.ReconfigVM_TaskRequestMsg()
shutdown = False shutdown = False
poweron = vm.is_powered_on()
memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled) memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled) cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled) cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)
# Change Memory # Change Memory
if vm_hardware['memory_mb']: if 'memory_mb' in vm_hardware:
if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB: if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
spec = spec_singleton(spec, request, vm) spec = spec_singleton(spec, request, vm)
@ -600,7 +678,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
changes['memory'] = vm_hardware['memory_mb'] changes['memory'] = vm_hardware['memory_mb']
# ====( Config Memory )====# # ====( Config Memory )====#
if vm_hardware['num_cpus']: if 'num_cpus' in vm_hardware:
if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU: if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
spec = spec_singleton(spec, request, vm) spec = spec_singleton(spec, request, vm)
@ -654,7 +732,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
module.fail_json( module.fail_json(
msg="Error reconfiguring vm: %s" % task.get_error_message()) msg="Error reconfiguring vm: %s" % task.get_error_message())
if vm.is_powered_off(): if vm.is_powered_off() and poweron:
try: try:
vm.power_on(sync_run=True) vm.power_on(sync_run=True)
except Exception, e: except Exception, e:
@ -1150,9 +1228,10 @@ def main():
'reconfigured' 'reconfigured'
], ],
default='present'), default='present'),
vmware_guest_facts=dict(required=False, choices=BOOLEANS), vmware_guest_facts=dict(required=False, type='bool'),
from_template=dict(required=False, choices=BOOLEANS), from_template=dict(required=False, type='bool'),
template_src=dict(required=False, type='str'), template_src=dict(required=False, type='str'),
snapshot_to_clone=dict(required=False, default=None, type='str'),
guest=dict(required=True, type='str'), guest=dict(required=True, type='str'),
vm_disk=dict(required=False, type='dict', default={}), vm_disk=dict(required=False, type='dict', default={}),
vm_nic=dict(required=False, type='dict', default={}), vm_nic=dict(required=False, type='dict', default={}),
@ -1161,7 +1240,7 @@ def main():
vm_hw_version=dict(required=False, default=None, type='str'), vm_hw_version=dict(required=False, default=None, type='str'),
resource_pool=dict(required=False, default=None, type='str'), resource_pool=dict(required=False, default=None, type='str'),
cluster=dict(required=False, default=None, type='str'), cluster=dict(required=False, default=None, type='str'),
force=dict(required=False, choices=BOOLEANS, default=False), force=dict(required=False, type='bool', default=False),
esxi=dict(required=False, type='dict', default={}), esxi=dict(required=False, type='dict', default={}),
@ -1177,8 +1256,7 @@ def main():
'vm_hardware', 'vm_hardware',
'esxi' 'esxi'
], ],
['resource_pool', 'cluster'], ['from_template', 'template_src'],
['from_template', 'resource_pool', 'template_src']
], ],
) )
@ -1202,6 +1280,8 @@ def main():
cluster = module.params['cluster'] cluster = module.params['cluster']
template_src = module.params['template_src'] template_src = module.params['template_src']
from_template = module.params['from_template'] from_template = module.params['from_template']
snapshot_to_clone = module.params['snapshot_to_clone']
# CONNECT TO THE SERVER # CONNECT TO THE SERVER
viserver = VIServer() viserver = VIServer()
@ -1281,7 +1361,8 @@ def main():
guest=guest, guest=guest,
template_src=template_src, template_src=template_src,
module=module, module=module,
cluster_name=cluster cluster_name=cluster,
snapshot_to_clone=snapshot_to_clone
) )
if state in ['restarted', 'reconfigured']: if state in ['restarted', 'reconfigured']:
module.fail_json( module.fail_json(
@ -1321,6 +1402,6 @@ def main():
# this is magic, see lib/ansible/module_common.py # this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>> from ansible.module_utils.basic import *
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View file

@ -21,6 +21,7 @@
import copy import copy
import sys import sys
import datetime import datetime
import glob
import traceback import traceback
import re import re
import shlex import shlex
@ -47,12 +48,12 @@ options:
aliases: [] aliases: []
creates: creates:
description: description:
- a filename, when it already exists, this step will B(not) be run. - a filename or glob pattern, when it already exists, this step will B(not) be run.
required: no required: no
default: null default: null
removes: removes:
description: description:
- a filename, when it does not exist, this step will B(not) be run. - a filename or glob pattern, when it does not exist, this step will B(not) be run.
version_added: "0.8" version_added: "0.8"
required: no required: no
default: null default: null
@ -81,7 +82,9 @@ notes:
M(command) module is much more secure as it's not affected by the user's M(command) module is much more secure as it's not affected by the user's
environment. environment.
- " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this." - " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this."
author: Michael DeHaan author:
- Ansible Core Team
- Michael DeHaan
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -154,12 +157,22 @@ def main():
# the command module is the one ansible module that does not take key=value args # the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others! # hence don't copy this one if you are looking to build others!
module = CommandModule(argument_spec=dict()) module = AnsibleModule(
argument_spec=dict(
_raw_params = dict(),
_uses_shell = dict(type='bool', default=False),
chdir = dict(),
executable = dict(),
creates = dict(),
removes = dict(),
warn = dict(type='bool', default=True),
)
)
shell = module.params['shell'] shell = module.params['_uses_shell']
chdir = module.params['chdir'] chdir = module.params['chdir']
executable = module.params['executable'] executable = module.params['executable']
args = module.params['args'] args = module.params['_raw_params']
creates = module.params['creates'] creates = module.params['creates']
removes = module.params['removes'] removes = module.params['removes']
warn = module.params['warn'] warn = module.params['warn']
@ -168,6 +181,7 @@ def main():
module.fail_json(rc=256, msg="no command given") module.fail_json(rc=256, msg="no command given")
if chdir: if chdir:
chdir = os.path.abspath(os.path.expanduser(chdir))
os.chdir(chdir) os.chdir(chdir)
if creates: if creates:
@ -175,7 +189,7 @@ def main():
# and the filename already exists. This allows idempotence # and the filename already exists. This allows idempotence
# of command executions. # of command executions.
v = os.path.expanduser(creates) v = os.path.expanduser(creates)
if os.path.exists(v): if glob.glob(v):
module.exit_json( module.exit_json(
cmd=args, cmd=args,
stdout="skipped, since %s exists" % v, stdout="skipped, since %s exists" % v,
@ -189,7 +203,7 @@ def main():
# and the filename does not exist. This allows idempotence # and the filename does not exist. This allows idempotence
# of command executions. # of command executions.
v = os.path.expanduser(removes) v = os.path.expanduser(removes)
if not os.path.exists(v): if not glob.glob(v):
module.exit_json( module.exit_json(
cmd=args, cmd=args,
stdout="skipped, since %s does not exist" % v, stdout="skipped, since %s does not exist" % v,
@ -232,48 +246,4 @@ def main():
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.splitter import * from ansible.module_utils.splitter import *
# only the command module should ever need to do this
# everything else should be simple key=value
class CommandModule(AnsibleModule):
def _handle_aliases(self):
return {}
def _check_invalid_arguments(self):
pass
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
args = MODULE_ARGS
params = copy.copy(OPTIONS)
params['shell'] = False
if "#USE_SHELL" in args:
args = args.replace("#USE_SHELL", "")
params['shell'] = True
items = split_args(args)
for x in items:
quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'")
if '=' in x and not quoted:
# check to see if this is a special parameter for the command
k, v = x.split('=', 1)
v = unquote(v.strip())
if k in OPTIONS.keys():
if k == "chdir":
v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v) and os.path.isdir(v)):
self.fail_json(rc=258, msg="cannot change to directory '%s': path does not exist" % v)
elif k == "executable":
v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v)):
self.fail_json(rc=258, msg="cannot use executable '%s': file does not exist" % v)
params[k] = v
# Remove any of the above k=v params from the args string
args = PARAM_REGEX.sub('', args)
params['args'] = args.strip()
return (params, params['args'])
main() main()

View file

@ -34,7 +34,9 @@ notes:
playbooks will follow the trend of using M(command) unless M(shell) is playbooks will follow the trend of using M(command) unless M(shell) is
explicitly required. When running ad-hoc commands, use your best explicitly required. When running ad-hoc commands, use your best
judgement. judgement.
author: Michael DeHaan author:
- Ansible Core Team
- Michael DeHaan
''' '''
EXAMPLES = ''' EXAMPLES = '''

View file

@ -32,7 +32,9 @@ options:
version_added: "1.5" version_added: "1.5"
notes: notes:
- It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points! - It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points!
author: Michael DeHaan author:
- Ansible Core Team
- Michael DeHaan
""" """
EXAMPLES = ''' EXAMPLES = '''

View file

@ -57,7 +57,9 @@ notes:
"{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons. "{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons.
requirements: [ ] requirements: [ ]
author: Michael DeHaan author:
- Ansible Core Team
- Michael DeHaan
''' '''
EXAMPLES = ''' EXAMPLES = '''

View file

@ -89,7 +89,7 @@ notes:
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
default login of C(root) with no password. default login of C(root) with no password.
requirements: [ ConfigParser ] requirements: [ ConfigParser ]
author: Mark Theunissen author: "Mark Theunissen (@marktheunissen)"
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -111,6 +111,7 @@ import ConfigParser
import os import os
import pipes import pipes
import stat import stat
import subprocess
try: try:
import MySQLdb import MySQLdb
except ImportError: except ImportError:
@ -142,14 +143,20 @@ def db_dump(module, host, user, password, db_name, target, all_databases, port,
cmd += " --all-databases" cmd += " --all-databases"
else: else:
cmd += " %s" % pipes.quote(db_name) cmd += " %s" % pipes.quote(db_name)
path = None
if os.path.splitext(target)[-1] == '.gz': if os.path.splitext(target)[-1] == '.gz':
cmd = cmd + ' | gzip > ' + pipes.quote(target) path = module.get_bin_path('gzip', True)
elif os.path.splitext(target)[-1] == '.bz2': elif os.path.splitext(target)[-1] == '.bz2':
cmd = cmd + ' | bzip2 > ' + pipes.quote(target) path = module.get_bin_path('bzip2', True)
elif os.path.splitext(target)[-1] == '.xz': elif os.path.splitext(target)[-1] == '.xz':
cmd = cmd + ' | xz > ' + pipes.quote(target) path = module.get_bin_path('xz', True)
if path:
cmd = '%s | %s > %s' % (cmd, path, pipes.quote(target))
else: else:
cmd += " > %s" % pipes.quote(target) cmd += " > %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr return rc, stdout, stderr
@ -157,69 +164,44 @@ def db_import(module, host, user, password, db_name, target, all_databases, port
if not os.path.exists(target): if not os.path.exists(target):
return module.fail_json(msg="target %s does not exist on the host" % target) return module.fail_json(msg="target %s does not exist on the host" % target)
cmd = module.get_bin_path('mysql', True) cmd = [module.get_bin_path('mysql', True)]
cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password)) if user:
cmd.append("--user=%s" % pipes.quote(user))
if password:
cmd.append("--password=%s" % pipes.quote(password))
if socket is not None: if socket is not None:
cmd += " --socket=%s" % pipes.quote(socket) cmd.append("--socket=%s" % pipes.quote(socket))
else: else:
cmd += " --host=%s --port=%i" % (pipes.quote(host), port) cmd.append("--host=%s" % pipes.quote(host))
cmd.append("--port=%i" % port)
if not all_databases: if not all_databases:
cmd += " -D %s" % pipes.quote(db_name) cmd.append("-D")
cmd.append(pipes.quote(db_name))
comp_prog_path = None
if os.path.splitext(target)[-1] == '.gz': if os.path.splitext(target)[-1] == '.gz':
gzip_path = module.get_bin_path('gzip') comp_prog_path = module.get_bin_path('gzip', required=True)
if not gzip_path:
module.fail_json(msg="gzip command not found")
#gzip -d file (uncompress)
rc, stdout, stderr = module.run_command('%s -d %s' % (gzip_path, target))
if rc != 0:
return rc, stdout, stderr
#Import sql
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
try:
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
return rc, stdout, stderr
finally:
#gzip file back up
module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0]))
elif os.path.splitext(target)[-1] == '.bz2': elif os.path.splitext(target)[-1] == '.bz2':
bzip2_path = module.get_bin_path('bzip2') comp_prog_path = module.get_bin_path('bzip2', required=True)
if not bzip2_path:
module.fail_json(msg="bzip2 command not found")
#bzip2 -d file (uncompress)
rc, stdout, stderr = module.run_command('%s -d %s' % (bzip2_path, target))
if rc != 0:
return rc, stdout, stderr
#Import sql
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
try:
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
return rc, stdout, stderr
finally:
#bzip2 file back up
rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0]))
elif os.path.splitext(target)[-1] == '.xz': elif os.path.splitext(target)[-1] == '.xz':
xz_path = module.get_bin_path('xz') comp_prog_path = module.get_bin_path('xz', required=True)
if not xz_path:
module.fail_json(msg="xz command not found") if comp_prog_path:
#xz -d file (uncompress) p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rc, stdout, stderr = module.run_command('%s -d %s' % (xz_path, target)) p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if rc != 0: (stdout2, stderr2) = p2.communicate()
return rc, stdout, stderr p1.stdout.close()
#Import sql p1.wait()
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) if p1.returncode != 0:
try: stderr1 = p1.stderr.read()
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) return p1.returncode, '', stderr1
if rc != 0: else:
return rc, stdout, stderr return p2.returncode, stdout2, stderr2
finally:
#xz file back up
rc, stdout, stderr = module.run_command('%s %s' % (xz_path, os.path.splitext(target)[0]))
else: else:
cmd = ' '.join(cmd)
cmd += " < %s" % pipes.quote(target) cmd += " < %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr return rc, stdout, stderr
def db_create(cursor, db, encoding, collation): def db_create(cursor, db, encoding, collation):
query_params = dict(enc=encoding, collate=collation) query_params = dict(enc=encoding, collate=collation)
@ -344,7 +326,7 @@ def main():
if state in ['dump','import']: if state in ['dump','import']:
if target is None: if target is None:
module.fail_json(msg="with state=%s target is required" % (state)) module.fail_json(msg="with state=%s target is required" % (state))
if db == 'all': if db == 'all':
connect_to_db = 'mysql' connect_to_db = 'mysql'
db = 'mysql' db = 'mysql'
all_databases = True all_databases = True
@ -370,11 +352,11 @@ def main():
db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db) db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db)
cursor = db_connection.cursor() cursor = db_connection.cursor()
except Exception, e: except Exception, e:
errno, errstr = e.args
if "Unknown database" in str(e): if "Unknown database" in str(e):
errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else: else:
module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running") module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running (ERROR: %s %s)" % (errno, errstr))
changed = False changed = False
if db_exists(cursor, db): if db_exists(cursor, db):

View file

@ -90,7 +90,8 @@ options:
description: description:
- Check if mysql allows login as root/nopassword before trying supplied credentials. - Check if mysql allows login as root/nopassword before trying supplied credentials.
required: false required: false
default: false choices: [ "yes", "no" ]
default: "no"
version_added: "1.3" version_added: "1.3"
update_password: update_password:
required: false required: false
@ -108,7 +109,7 @@ options:
notes: notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this - Requires the MySQLdb Python package on the remote host. For Ubuntu, this
is as easy as apt-get install python-mysqldb. is as easy as apt-get install python-mysqldb.
- Both C(login_password) and C(login_username) are required when you are - Both C(login_password) and C(login_user) are required when you are
passing credentials. If none are present, the module will attempt to read passing credentials. If none are present, the module will attempt to read
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
default login of 'root' with no password. default login of 'root' with no password.
@ -119,7 +120,7 @@ notes:
the file." the file."
requirements: [ "MySQLdb" ] requirements: [ "MySQLdb" ]
author: Mark Theunissen author: "Mark Theunissen (@marktheunissen)"
''' '''
EXAMPLES = """ EXAMPLES = """
@ -148,8 +149,6 @@ mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL
- mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock - mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock
# Example .my.cnf file for setting the root password # Example .my.cnf file for setting the root password
# Note: don't use quotes around the password, because the mysql_user module
# will include them in the password but the mysql client will not
[client] [client]
user=root user=root
@ -158,6 +157,7 @@ password=n<_665{vS43y
import getpass import getpass
import tempfile import tempfile
import re
try: try:
import MySQLdb import MySQLdb
except ImportError: except ImportError:
@ -244,7 +244,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs):
grant_option = True grant_option = True
if db_table not in new_priv: if db_table not in new_priv:
if user != "root" and "PROXY" not in priv and not append_privs: if user != "root" and "PROXY" not in priv and not append_privs:
privileges_revoke(cursor, user,host,db_table,grant_option) privileges_revoke(cursor, user,host,db_table,priv,grant_option)
changed = True changed = True
# If the user doesn't currently have any privileges on a db.table, then # If the user doesn't currently have any privileges on a db.table, then
@ -261,7 +261,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs):
priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table]) priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
if (len(priv_diff) > 0): if (len(priv_diff) > 0):
if not append_privs: if not append_privs:
privileges_revoke(cursor, user,host,db_table,grant_option) privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option)
privileges_grant(cursor, user,host,db_table,new_priv[db_table]) privileges_grant(cursor, user,host,db_table,new_priv[db_table])
changed = True changed = True
@ -292,7 +292,7 @@ def privileges_get(cursor, user,host):
return x return x
for grant in grants: for grant in grants:
res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) res = re.match("GRANT (.+) ON (.+) TO '.*'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0])
if res is None: if res is None:
raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0]) raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0])
privileges = res.group(1).split(", ") privileges = res.group(1).split(", ")
@ -317,17 +317,19 @@ def privileges_unpack(priv):
not specified in the string, as MySQL will always provide this by default. not specified in the string, as MySQL will always provide this by default.
""" """
output = {} output = {}
privs = []
for item in priv.strip().split('/'): for item in priv.strip().split('/'):
pieces = item.strip().split(':') pieces = item.strip().split(':')
if '.' in pieces[0]: dbpriv = pieces[0].rsplit(".", 1)
pieces[0] = pieces[0].split('.') pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1])
for idx, piece in enumerate(pieces): if '(' in pieces[1]:
if pieces[0][idx] != "*": output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper())
pieces[0][idx] = "`" + pieces[0][idx] + "`" for i in output[pieces[0]]:
pieces[0] = '.'.join(pieces[0]) privs.append(re.sub(r'\(.*\)','',i))
else:
output[pieces[0]] = pieces[1].upper().split(',') output[pieces[0]] = pieces[1].upper().split(',')
new_privs = frozenset(output[pieces[0]]) privs = output[pieces[0]]
new_privs = frozenset(privs)
if not new_privs.issubset(VALID_PRIVS): if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
@ -341,7 +343,7 @@ def privileges_unpack(priv):
return output return output
def privileges_revoke(cursor, user,host,db_table,grant_option): def privileges_revoke(cursor, user,host,db_table,priv,grant_option):
# Escape '%' since mysql db.execute() uses a format string # Escape '%' since mysql db.execute() uses a format string
db_table = db_table.replace('%', '%%') db_table = db_table.replace('%', '%%')
if grant_option: if grant_option:
@ -349,7 +351,8 @@ def privileges_revoke(cursor, user,host,db_table,grant_option):
query.append("FROM %s@%s") query.append("FROM %s@%s")
query = ' '.join(query) query = ' '.join(query)
cursor.execute(query, (user, host)) cursor.execute(query, (user, host))
query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')] priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["REVOKE %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))]
query.append("FROM %s@%s") query.append("FROM %s@%s")
query = ' '.join(query) query = ' '.join(query)
cursor.execute(query, (user, host)) cursor.execute(query, (user, host))
@ -358,7 +361,7 @@ def privileges_grant(cursor, user,host,db_table,priv):
# Escape '%' since mysql db.execute uses a format string and the # Escape '%' since mysql db.execute uses a format string and the
# specification of db and table often use a % (SQL wildcard) # specification of db and table often use a % (SQL wildcard)
db_table = db_table.replace('%', '%%') db_table = db_table.replace('%', '%%')
priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv)) priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))]
query.append("TO %s@%s") query.append("TO %s@%s")
if 'GRANT' in priv: if 'GRANT' in priv:
@ -381,12 +384,12 @@ def main():
login_port=dict(default=3306, type='int'), login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None), login_unix_socket=dict(default=None),
user=dict(required=True, aliases=['name']), user=dict(required=True, aliases=['name']),
password=dict(default=None), password=dict(default=None, no_log=True),
host=dict(default="localhost"), host=dict(default="localhost"),
state=dict(default="present", choices=["absent", "present"]), state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None), priv=dict(default=None),
append_privs=dict(type="bool", default="no"), append_privs=dict(default=False, type='bool'),
check_implicit_admin=dict(default=False), check_implicit_admin=dict(default=False, type='bool'),
update_password=dict(default="always", choices=["always", "on_create"]), update_password=dict(default="always", choices=["always", "on_create"]),
config_file=dict(default="~/.my.cnf"), config_file=dict(default="~/.my.cnf"),
) )
@ -395,7 +398,7 @@ def main():
login_password = module.params["login_password"] login_password = module.params["login_password"]
user = module.params["user"] user = module.params["user"]
password = module.params["password"] password = module.params["password"]
host = module.params["host"] host = module.params["host"].lower()
state = module.params["state"] state = module.params["state"]
priv = module.params["priv"] priv = module.params["priv"]
check_implicit_admin = module.params['check_implicit_admin'] check_implicit_admin = module.params['check_implicit_admin']

View file

@ -30,6 +30,7 @@ short_description: Manage MySQL global variables
description: description:
- Query / Set MySQL variables - Query / Set MySQL variables
version_added: 1.3 version_added: 1.3
author: "Balazs Pocze (@banyek)"
options: options:
variable: variable:
description: description:
@ -51,6 +52,11 @@ options:
description: description:
- mysql host to connect - mysql host to connect
required: False required: False
login_port:
version_added: "2.0"
description:
- mysql port to connect
required: False
login_unix_socket: login_unix_socket:
description: description:
- unix socket to connect mysql server - unix socket to connect mysql server
@ -67,6 +73,7 @@ EXAMPLES = '''
import ConfigParser import ConfigParser
import os import os
import warnings import warnings
from re import match
try: try:
import MySQLdb import MySQLdb
@ -103,10 +110,12 @@ def typedvalue(value):
def getvariable(cursor, mysqlvar): def getvariable(cursor, mysqlvar):
cursor.execute("SHOW VARIABLES LIKE %s", (mysqlvar,)) cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,))
mysqlvar_val = cursor.fetchall() mysqlvar_val = cursor.fetchall()
return mysqlvar_val if len(mysqlvar_val) is 1:
return mysqlvar_val[0][1]
else:
return None
def setvariable(cursor, mysqlvar, value): def setvariable(cursor, mysqlvar, value):
""" Set a global mysql variable to a given value """ Set a global mysql variable to a given value
@ -116,11 +125,9 @@ def setvariable(cursor, mysqlvar, value):
should be passed as numeric literals. should be passed as numeric literals.
""" """
query = ["SET GLOBAL %s" % mysql_quote_identifier(mysqlvar, 'vars') ] query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars')
query.append(" = %s")
query = ' '.join(query)
try: try:
cursor.execute(query, (value,)) cursor.execute(query + "%s", (value,))
cursor.fetchall() cursor.fetchall()
result = True result = True
except Exception, e: except Exception, e:
@ -192,7 +199,8 @@ def main():
argument_spec = dict( argument_spec = dict(
login_user=dict(default=None), login_user=dict(default=None),
login_password=dict(default=None), login_password=dict(default=None),
login_host=dict(default="localhost"), login_host=dict(default="127.0.0.1"),
login_port=dict(default="3306", type='int'),
login_unix_socket=dict(default=None), login_unix_socket=dict(default=None),
variable=dict(default=None), variable=dict(default=None),
value=dict(default=None) value=dict(default=None)
@ -202,8 +210,13 @@ def main():
user = module.params["login_user"] user = module.params["login_user"]
password = module.params["login_password"] password = module.params["login_password"]
host = module.params["login_host"] host = module.params["login_host"]
port = module.params["login_port"]
mysqlvar = module.params["variable"] mysqlvar = module.params["variable"]
value = module.params["value"] value = module.params["value"]
if mysqlvar is None:
module.fail_json(msg="Cannot run without variable to operate with")
if match('^[0-9a-z_]+$', mysqlvar) is None:
module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar)
if not mysqldb_found: if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required") module.fail_json(msg="the python mysqldb module is required")
else: else:
@ -226,23 +239,21 @@ def main():
module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided")
try: try:
if module.params["login_unix_socket"]: if module.params["login_unix_socket"]:
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql")
else: else:
db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password, db="mysql") db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql")
cursor = db_connection.cursor() cursor = db_connection.cursor()
except Exception, e: except Exception, e:
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials")
if mysqlvar is None:
module.fail_json(msg="Cannot run without variable to operate with")
mysqlvar_val = getvariable(cursor, mysqlvar) mysqlvar_val = getvariable(cursor, mysqlvar)
if mysqlvar_val is None:
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)
if value is None: if value is None:
module.exit_json(msg=mysqlvar_val) module.exit_json(msg=mysqlvar_val)
else: else:
if len(mysqlvar_val) < 1:
module.fail_json(msg="Variable not available", changed=False)
# Type values before using them # Type values before using them
value_wanted = typedvalue(value) value_wanted = typedvalue(value)
value_actual = typedvalue(mysqlvar_val[0][1]) value_actual = typedvalue(mysqlvar_val)
if value_wanted == value_actual: if value_wanted == value_actual:
module.exit_json(msg="Variable already set to requested value", changed=False) module.exit_json(msg="Variable already set to requested value", changed=False)
try: try:

Some files were not shown because too many files have changed in this diff Show more