Don't raise or catch StandardError in amazon modules
This commit is contained in:
parent
579e444243
commit
6cb1606005
9 changed files with 104 additions and 114 deletions
|
@ -154,9 +154,9 @@ EXAMPLES = '''
|
||||||
|
|
||||||
# Rolling ASG Updates
|
# Rolling ASG Updates
|
||||||
|
|
||||||
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
|
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
|
||||||
|
|
||||||
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
|
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
|
||||||
a rolling fashion with instances using the current launch configuration, "my_new_lc".
|
a rolling fashion with instances using the current launch configuration, "my_new_lc".
|
||||||
|
|
||||||
This could also be considered a rolling deploy of a pre-baked AMI.
|
This could also be considered a rolling deploy of a pre-baked AMI.
|
||||||
|
@ -283,7 +283,6 @@ def get_properties(autoscaling_group):
|
||||||
if getattr(autoscaling_group, "tags", None):
|
if getattr(autoscaling_group, "tags", None):
|
||||||
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
|
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
|
||||||
|
|
||||||
|
|
||||||
return properties
|
return properties
|
||||||
|
|
||||||
def elb_dreg(asg_connection, module, group_name, instance_id):
|
def elb_dreg(asg_connection, module, group_name, instance_id):
|
||||||
|
@ -300,7 +299,6 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
||||||
exists = True
|
|
||||||
for lb in as_group.load_balancers:
|
for lb in as_group.load_balancers:
|
||||||
elb_connection.deregister_instances(lb, instance_id)
|
elb_connection.deregister_instances(lb, instance_id)
|
||||||
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
|
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
|
||||||
|
@ -317,10 +315,8 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|
||||||
if wait_timeout <= time.time():
|
if wait_timeout <= time.time():
|
||||||
# waiting took too long
|
# waiting took too long
|
||||||
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
|
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def elb_healthy(asg_connection, elb_connection, module, group_name):
|
def elb_healthy(asg_connection, elb_connection, module, group_name):
|
||||||
|
@ -339,7 +335,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
|
||||||
# but has not yet show up in the ELB
|
# but has not yet show up in the ELB
|
||||||
try:
|
try:
|
||||||
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
|
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
|
||||||
except boto.exception.InvalidInstance, e:
|
except boto.exception.InvalidInstance:
|
||||||
pass
|
pass
|
||||||
for i in lb_instances:
|
for i in lb_instances:
|
||||||
if i.state == "InService":
|
if i.state == "InService":
|
||||||
|
@ -348,7 +344,6 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
|
||||||
return len(healthy_instances)
|
return len(healthy_instances)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def wait_for_elb(asg_connection, module, group_name):
|
def wait_for_elb(asg_connection, module, group_name):
|
||||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||||
wait_timeout = module.params.get('wait_timeout')
|
wait_timeout = module.params.get('wait_timeout')
|
||||||
|
@ -372,7 +367,7 @@ def wait_for_elb(asg_connection, module, group_name):
|
||||||
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
|
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
if wait_timeout <= time.time():
|
if wait_timeout <= time.time():
|
||||||
# waiting took too long
|
# waiting took too long
|
||||||
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
|
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
|
||||||
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
|
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
|
||||||
|
|
||||||
|
@ -398,7 +393,7 @@ def create_autoscaling_group(connection, module):
|
||||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||||
try:
|
try:
|
||||||
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
elif vpc_zone_identifier:
|
elif vpc_zone_identifier:
|
||||||
vpc_zone_identifier = ','.join(vpc_zone_identifier)
|
vpc_zone_identifier = ','.join(vpc_zone_identifier)
|
||||||
|
@ -435,7 +430,7 @@ def create_autoscaling_group(connection, module):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
connection.create_auto_scaling_group(ag)
|
connection.create_auto_scaling_group(ag)
|
||||||
if wait_for_instances == True:
|
if wait_for_instances:
|
||||||
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
|
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
|
||||||
wait_for_elb(connection, module, group_name)
|
wait_for_elb(connection, module, group_name)
|
||||||
as_group = connection.get_all_groups(names=[group_name])[0]
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
||||||
|
@ -477,7 +472,7 @@ def create_autoscaling_group(connection, module):
|
||||||
dead_tags = []
|
dead_tags = []
|
||||||
for tag in as_group.tags:
|
for tag in as_group.tags:
|
||||||
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
|
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
|
||||||
if not tag.key in want_tags:
|
if tag.key not in want_tags:
|
||||||
changed = True
|
changed = True
|
||||||
dead_tags.append(tag)
|
dead_tags.append(tag)
|
||||||
|
|
||||||
|
@ -494,14 +489,13 @@ def create_autoscaling_group(connection, module):
|
||||||
changed = True
|
changed = True
|
||||||
as_group.load_balancers = module.params.get('load_balancers')
|
as_group.load_balancers = module.params.get('load_balancers')
|
||||||
|
|
||||||
|
|
||||||
if changed:
|
if changed:
|
||||||
try:
|
try:
|
||||||
as_group.update()
|
as_group.update()
|
||||||
except BotoServerError, e:
|
except BotoServerError, e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
if wait_for_instances == True:
|
if wait_for_instances:
|
||||||
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
|
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
|
||||||
wait_for_elb(connection, module, group_name)
|
wait_for_elb(connection, module, group_name)
|
||||||
try:
|
try:
|
||||||
|
@ -527,7 +521,7 @@ def delete_autoscaling_group(connection, module):
|
||||||
if tmp_groups:
|
if tmp_groups:
|
||||||
tmp_group = tmp_groups[0]
|
tmp_group = tmp_groups[0]
|
||||||
if not tmp_group.instances:
|
if not tmp_group.instances:
|
||||||
instances = False
|
instances = False
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|
||||||
group.delete()
|
group.delete()
|
||||||
|
@ -582,15 +576,15 @@ def replace(connection, module):
|
||||||
changed = True
|
changed = True
|
||||||
return(changed, props)
|
return(changed, props)
|
||||||
|
|
||||||
# we don't want to spin up extra instances if not necessary
|
# we don't want to spin up extra instances if not necessary
|
||||||
if num_new_inst_needed < batch_size:
|
if num_new_inst_needed < batch_size:
|
||||||
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
|
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
|
||||||
batch_size = num_new_inst_needed
|
batch_size = num_new_inst_needed
|
||||||
|
|
||||||
if not old_instances:
|
if not old_instances:
|
||||||
changed = False
|
changed = False
|
||||||
return(changed, props)
|
return(changed, props)
|
||||||
|
|
||||||
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
|
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
|
||||||
if min_size is None:
|
if min_size is None:
|
||||||
min_size = as_group.min_size
|
min_size = as_group.min_size
|
||||||
|
@ -639,7 +633,7 @@ def get_instances_by_lc(props, lc_check, initial_instances):
|
||||||
new_instances.append(i)
|
new_instances.append(i)
|
||||||
else:
|
else:
|
||||||
old_instances.append(i)
|
old_instances.append(i)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
|
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
|
||||||
for i in props['instances']:
|
for i in props['instances']:
|
||||||
|
@ -661,10 +655,10 @@ def list_purgeable_instances(props, lc_check, replace_instances, initial_instanc
|
||||||
# and they have a non-current launch config
|
# and they have a non-current launch config
|
||||||
if lc_check:
|
if lc_check:
|
||||||
for i in instances:
|
for i in instances:
|
||||||
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
|
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
|
||||||
instances_to_terminate.append(i)
|
instances_to_terminate.append(i)
|
||||||
else:
|
else:
|
||||||
for i in instances:
|
for i in instances:
|
||||||
if i in initial_instances:
|
if i in initial_instances:
|
||||||
instances_to_terminate.append(i)
|
instances_to_terminate.append(i)
|
||||||
return instances_to_terminate
|
return instances_to_terminate
|
||||||
|
@ -678,7 +672,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
|
||||||
lc_check = module.params.get('lc_check')
|
lc_check = module.params.get('lc_check')
|
||||||
decrement_capacity = False
|
decrement_capacity = False
|
||||||
break_loop = False
|
break_loop = False
|
||||||
|
|
||||||
as_group = connection.get_all_groups(names=[group_name])[0]
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
||||||
props = get_properties(as_group)
|
props = get_properties(as_group)
|
||||||
desired_size = as_group.min_size
|
desired_size = as_group.min_size
|
||||||
|
@ -722,7 +716,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
|
||||||
elb_dreg(connection, module, group_name, instance_id)
|
elb_dreg(connection, module, group_name, instance_id)
|
||||||
log.debug("terminating instance: {0}".format(instance_id))
|
log.debug("terminating instance: {0}".format(instance_id))
|
||||||
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
|
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
|
||||||
|
|
||||||
# we wait to make sure the machines we marked as Unhealthy are
|
# we wait to make sure the machines we marked as Unhealthy are
|
||||||
# no longer in the list
|
# no longer in the list
|
||||||
|
|
||||||
|
@ -758,7 +752,7 @@ def wait_for_term_inst(connection, module, term_instances):
|
||||||
# waiting took too long
|
# waiting took too long
|
||||||
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
|
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
|
||||||
|
|
||||||
|
|
||||||
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
|
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
|
||||||
|
|
||||||
# make sure we have the latest stats after that last loop.
|
# make sure we have the latest stats after that last loop.
|
||||||
|
@ -804,9 +798,9 @@ def main():
|
||||||
termination_policies=dict(type='list', default='Default')
|
termination_policies=dict(type='list', default='Default')
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
module = AnsibleModule(
|
module = AnsibleModule(
|
||||||
argument_spec=argument_spec,
|
argument_spec=argument_spec,
|
||||||
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
|
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -828,13 +822,13 @@ def main():
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
create_changed, asg_properties=create_autoscaling_group(connection, module)
|
create_changed, asg_properties=create_autoscaling_group(connection, module)
|
||||||
elif state == 'absent':
|
elif state == 'absent':
|
||||||
changed = delete_autoscaling_group(connection, module)
|
changed = delete_autoscaling_group(connection, module)
|
||||||
module.exit_json( changed = changed )
|
module.exit_json( changed = changed )
|
||||||
if replace_all_instances or replace_instances:
|
if replace_all_instances or replace_instances:
|
||||||
replace_changed, asg_properties=replace(connection, module)
|
replace_changed, asg_properties=replace(connection, module)
|
||||||
if create_changed or replace_changed:
|
if create_changed or replace_changed:
|
||||||
changed = True
|
changed = True
|
||||||
module.exit_json( changed = changed, **asg_properties )
|
module.exit_json( changed = changed, **asg_properties )
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -260,7 +260,7 @@ class ElbManager:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
|
elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
self.module.fail_json(msg=str(e))
|
self.module.fail_json(msg=str(e))
|
||||||
|
|
||||||
elbs = []
|
elbs = []
|
||||||
|
@ -293,7 +293,7 @@ class ElbManager:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
|
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
self.module.fail_json(msg=str(e))
|
self.module.fail_json(msg=str(e))
|
||||||
|
|
||||||
asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
|
asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
|
||||||
|
@ -317,7 +317,7 @@ class ElbManager:
|
||||||
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
|
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
|
||||||
try:
|
try:
|
||||||
ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
|
ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
self.module.fail_json(msg=str(e))
|
self.module.fail_json(msg=str(e))
|
||||||
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
|
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
|
||||||
|
|
||||||
|
@ -377,4 +377,5 @@ def main():
|
||||||
from ansible.module_utils.basic import *
|
from ansible.module_utils.basic import *
|
||||||
from ansible.module_utils.ec2 import *
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
|
@ -489,7 +489,7 @@ class ElbManager(object):
|
||||||
try:
|
try:
|
||||||
return connect_to_aws(boto.ec2.elb, self.region,
|
return connect_to_aws(boto.ec2.elb, self.region,
|
||||||
**self.aws_connect_params)
|
**self.aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
self.module.fail_json(msg=str(e))
|
self.module.fail_json(msg=str(e))
|
||||||
|
|
||||||
def _delete_elb(self):
|
def _delete_elb(self):
|
||||||
|
@ -977,4 +977,5 @@ def main():
|
||||||
from ansible.module_utils.basic import *
|
from ansible.module_utils.basic import *
|
||||||
from ansible.module_utils.ec2 import *
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
|
@ -314,7 +314,7 @@ def main():
|
||||||
|
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
state = module.params.get('state')
|
state = module.params.get('state')
|
||||||
|
|
|
@ -113,8 +113,6 @@ EXAMPLES = '''
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import boto.ec2.cloudwatch
|
import boto.ec2.cloudwatch
|
||||||
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
|
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
|
||||||
|
@ -268,11 +266,11 @@ def main():
|
||||||
state = module.params.get('state')
|
state = module.params.get('state')
|
||||||
|
|
||||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||||
|
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
|
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
@ -286,4 +284,5 @@ def main():
|
||||||
from ansible.module_utils.basic import *
|
from ansible.module_utils.basic import *
|
||||||
from ansible.module_utils.ec2 import *
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
|
@ -176,7 +176,7 @@ def main():
|
||||||
|
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg = str(e))
|
module.fail_json(msg = str(e))
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
|
@ -185,4 +185,5 @@ def main():
|
||||||
delete_scaling_policy(connection, module)
|
delete_scaling_policy(connection, module)
|
||||||
|
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
|
@ -47,7 +47,7 @@ options:
|
||||||
volume_type:
|
volume_type:
|
||||||
description:
|
description:
|
||||||
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
|
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
|
||||||
and continues to remain the Ansible default for backwards compatibility.
|
and continues to remain the Ansible default for backwards compatibility.
|
||||||
required: false
|
required: false
|
||||||
default: standard
|
default: standard
|
||||||
version_added: "1.9"
|
version_added: "1.9"
|
||||||
|
@ -69,7 +69,7 @@ options:
|
||||||
default: null
|
default: null
|
||||||
zone:
|
zone:
|
||||||
description:
|
description:
|
||||||
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
|
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
|
||||||
required: false
|
required: false
|
||||||
default: null
|
default: null
|
||||||
aliases: ['aws_zone', 'ec2_zone']
|
aliases: ['aws_zone', 'ec2_zone']
|
||||||
|
@ -87,7 +87,7 @@ options:
|
||||||
choices: ["yes", "no"]
|
choices: ["yes", "no"]
|
||||||
version_added: "1.5"
|
version_added: "1.5"
|
||||||
state:
|
state:
|
||||||
description:
|
description:
|
||||||
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
|
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
|
||||||
required: false
|
required: false
|
||||||
default: present
|
default: present
|
||||||
|
@ -99,15 +99,15 @@ extends_documentation_fragment: aws
|
||||||
|
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
# Simple attachment action
|
# Simple attachment action
|
||||||
- ec2_vol:
|
- ec2_vol:
|
||||||
instance: XXXXXX
|
instance: XXXXXX
|
||||||
volume_size: 5
|
volume_size: 5
|
||||||
device_name: sdd
|
device_name: sdd
|
||||||
|
|
||||||
# Example using custom iops params
|
# Example using custom iops params
|
||||||
- ec2_vol:
|
- ec2_vol:
|
||||||
instance: XXXXXX
|
instance: XXXXXX
|
||||||
volume_size: 5
|
volume_size: 5
|
||||||
iops: 100
|
iops: 100
|
||||||
device_name: sdd
|
device_name: sdd
|
||||||
|
|
||||||
|
@ -116,15 +116,15 @@ EXAMPLES = '''
|
||||||
instance: XXXXXX
|
instance: XXXXXX
|
||||||
snapshot: "{{ snapshot }}"
|
snapshot: "{{ snapshot }}"
|
||||||
|
|
||||||
# Playbook example combined with instance launch
|
# Playbook example combined with instance launch
|
||||||
- ec2:
|
- ec2:
|
||||||
keypair: "{{ keypair }}"
|
keypair: "{{ keypair }}"
|
||||||
image: "{{ image }}"
|
image: "{{ image }}"
|
||||||
wait: yes
|
wait: yes
|
||||||
count: 3
|
count: 3
|
||||||
register: ec2
|
register: ec2
|
||||||
- ec2_vol:
|
- ec2_vol:
|
||||||
instance: "{{ item.id }} "
|
instance: "{{ item.id }} "
|
||||||
volume_size: 5
|
volume_size: 5
|
||||||
with_items: ec2.instances
|
with_items: ec2.instances
|
||||||
register: ec2_vol
|
register: ec2_vol
|
||||||
|
@ -221,7 +221,7 @@ def get_volume(module, ec2):
|
||||||
return vols[0]
|
return vols[0]
|
||||||
|
|
||||||
def get_volumes(module, ec2):
|
def get_volumes(module, ec2):
|
||||||
|
|
||||||
instance = module.params.get('instance')
|
instance = module.params.get('instance')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -252,12 +252,10 @@ def boto_supports_volume_encryption():
|
||||||
"""
|
"""
|
||||||
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
|
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
|
||||||
|
|
||||||
|
|
||||||
def create_volume(module, ec2, zone):
|
def create_volume(module, ec2, zone):
|
||||||
changed = False
|
changed = False
|
||||||
name = module.params.get('name')
|
name = module.params.get('name')
|
||||||
id = module.params.get('id')
|
|
||||||
instance = module.params.get('instance')
|
|
||||||
iops = module.params.get('iops')
|
iops = module.params.get('iops')
|
||||||
encrypted = module.params.get('encrypted')
|
encrypted = module.params.get('encrypted')
|
||||||
volume_size = module.params.get('volume_size')
|
volume_size = module.params.get('volume_size')
|
||||||
|
@ -290,16 +288,16 @@ def create_volume(module, ec2, zone):
|
||||||
|
|
||||||
|
|
||||||
def attach_volume(module, ec2, volume, instance):
|
def attach_volume(module, ec2, volume, instance):
|
||||||
|
|
||||||
device_name = module.params.get('device_name')
|
device_name = module.params.get('device_name')
|
||||||
changed = False
|
changed = False
|
||||||
|
|
||||||
# If device_name isn't set, make a choice based on best practices here:
|
# If device_name isn't set, make a choice based on best practices here:
|
||||||
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
|
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
|
||||||
|
|
||||||
# In future this needs to be more dynamic but combining block device mapping best practices
|
# In future this needs to be more dynamic but combining block device mapping best practices
|
||||||
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
|
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
|
||||||
|
|
||||||
# Use password data attribute to tell whether the instance is Windows or Linux
|
# Use password data attribute to tell whether the instance is Windows or Linux
|
||||||
if device_name is None:
|
if device_name is None:
|
||||||
try:
|
try:
|
||||||
|
@ -309,7 +307,7 @@ def attach_volume(module, ec2, volume, instance):
|
||||||
device_name = '/dev/xvdf'
|
device_name = '/dev/xvdf'
|
||||||
except boto.exception.BotoServerError, e:
|
except boto.exception.BotoServerError, e:
|
||||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||||
|
|
||||||
if volume.attachment_state() is not None:
|
if volume.attachment_state() is not None:
|
||||||
adata = volume.attach_data
|
adata = volume.attach_data
|
||||||
if adata.instance_id != instance.id:
|
if adata.instance_id != instance.id:
|
||||||
|
@ -328,9 +326,9 @@ def attach_volume(module, ec2, volume, instance):
|
||||||
return volume, changed
|
return volume, changed
|
||||||
|
|
||||||
def detach_volume(module, ec2, volume):
|
def detach_volume(module, ec2, volume):
|
||||||
|
|
||||||
changed = False
|
changed = False
|
||||||
|
|
||||||
if volume.attachment_state() is not None:
|
if volume.attachment_state() is not None:
|
||||||
adata = volume.attach_data
|
adata = volume.attach_data
|
||||||
volume.detach()
|
volume.detach()
|
||||||
|
@ -338,15 +336,15 @@ def detach_volume(module, ec2, volume):
|
||||||
time.sleep(3)
|
time.sleep(3)
|
||||||
volume.update()
|
volume.update()
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
return volume, changed
|
return volume, changed
|
||||||
|
|
||||||
def get_volume_info(volume, state):
|
def get_volume_info(volume, state):
|
||||||
|
|
||||||
# If we're just listing volumes then do nothing, else get the latest update for the volume
|
# If we're just listing volumes then do nothing, else get the latest update for the volume
|
||||||
if state != 'list':
|
if state != 'list':
|
||||||
volume.update()
|
volume.update()
|
||||||
|
|
||||||
volume_info = {}
|
volume_info = {}
|
||||||
attachment = volume.attach_data
|
attachment = volume.attach_data
|
||||||
|
|
||||||
|
@ -367,7 +365,7 @@ def get_volume_info(volume, state):
|
||||||
},
|
},
|
||||||
'tags': volume.tags
|
'tags': volume.tags
|
||||||
}
|
}
|
||||||
|
|
||||||
return volume_info
|
return volume_info
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
@ -395,34 +393,32 @@ def main():
|
||||||
name = module.params.get('name')
|
name = module.params.get('name')
|
||||||
instance = module.params.get('instance')
|
instance = module.params.get('instance')
|
||||||
volume_size = module.params.get('volume_size')
|
volume_size = module.params.get('volume_size')
|
||||||
volume_type = module.params.get('volume_type')
|
|
||||||
iops = module.params.get('iops')
|
|
||||||
encrypted = module.params.get('encrypted')
|
encrypted = module.params.get('encrypted')
|
||||||
device_name = module.params.get('device_name')
|
device_name = module.params.get('device_name')
|
||||||
zone = module.params.get('zone')
|
zone = module.params.get('zone')
|
||||||
snapshot = module.params.get('snapshot')
|
snapshot = module.params.get('snapshot')
|
||||||
state = module.params.get('state')
|
state = module.params.get('state')
|
||||||
|
|
||||||
# Ensure we have the zone or can get the zone
|
# Ensure we have the zone or can get the zone
|
||||||
if instance is None and zone is None and state == 'present':
|
if instance is None and zone is None and state == 'present':
|
||||||
module.fail_json(msg="You must specify either instance or zone")
|
module.fail_json(msg="You must specify either instance or zone")
|
||||||
|
|
||||||
# Set volume detach flag
|
# Set volume detach flag
|
||||||
if instance == 'None' or instance == '':
|
if instance == 'None' or instance == '':
|
||||||
instance = None
|
instance = None
|
||||||
detach_vol_flag = True
|
detach_vol_flag = True
|
||||||
else:
|
else:
|
||||||
detach_vol_flag = False
|
detach_vol_flag = False
|
||||||
|
|
||||||
# Set changed flag
|
# Set changed flag
|
||||||
changed = False
|
changed = False
|
||||||
|
|
||||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||||
|
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
@ -469,11 +465,11 @@ def main():
|
||||||
|
|
||||||
if volume_size and (id or snapshot):
|
if volume_size and (id or snapshot):
|
||||||
module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
|
module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
volume, changed = create_volume(module, ec2, zone)
|
volume, changed = create_volume(module, ec2, zone)
|
||||||
if detach_vol_flag:
|
if detach_vol_flag:
|
||||||
volume, changed = detach_volume(module, ec2, volume)
|
volume, changed = detach_volume(module, ec2, volume)
|
||||||
elif inst is not None:
|
elif inst is not None:
|
||||||
volume, changed = attach_volume(module, ec2, volume, inst)
|
volume, changed = attach_volume(module, ec2, volume, inst)
|
||||||
|
|
||||||
|
@ -487,4 +483,5 @@ def main():
|
||||||
from ansible.module_utils.basic import *
|
from ansible.module_utils.basic import *
|
||||||
from ansible.module_utils.ec2 import *
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
|
@ -91,9 +91,6 @@ EXAMPLES = '''
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import time
|
|
||||||
import sys
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import boto
|
import boto
|
||||||
import boto.ec2
|
import boto.ec2
|
||||||
|
@ -134,15 +131,15 @@ def vpc_exists(module, vpc, name, cidr_block, multi):
|
||||||
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
|
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
|
||||||
'CIDR block you specified. If you would like to create '
|
'CIDR block you specified. If you would like to create '
|
||||||
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
|
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
|
||||||
|
|
||||||
return matched_vpc
|
return matched_vpc
|
||||||
|
|
||||||
|
|
||||||
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
|
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
|
||||||
|
|
||||||
if tags is None:
|
if tags is None:
|
||||||
tags = dict()
|
tags = dict()
|
||||||
|
|
||||||
tags.update({'Name': name})
|
tags.update({'Name': name})
|
||||||
try:
|
try:
|
||||||
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
|
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
|
||||||
|
@ -154,10 +151,10 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name):
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
e_msg=boto_exception(e)
|
e_msg=boto_exception(e)
|
||||||
module.fail_json(msg=e_msg)
|
module.fail_json(msg=e_msg)
|
||||||
|
|
||||||
|
|
||||||
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
|
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
|
||||||
|
|
||||||
if vpc_obj.dhcp_options_id != dhcp_id:
|
if vpc_obj.dhcp_options_id != dhcp_id:
|
||||||
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
|
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
|
||||||
return True
|
return True
|
||||||
|
@ -209,48 +206,47 @@ def main():
|
||||||
tags=module.params.get('tags')
|
tags=module.params.get('tags')
|
||||||
state=module.params.get('state')
|
state=module.params.get('state')
|
||||||
multi=module.params.get('multi_ok')
|
multi=module.params.get('multi_ok')
|
||||||
|
|
||||||
changed=False
|
changed=False
|
||||||
|
|
||||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||||
|
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
|
||||||
if dns_hostnames and not dns_support:
|
if dns_hostnames and not dns_support:
|
||||||
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
|
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
|
|
||||||
# Check if VPC exists
|
# Check if VPC exists
|
||||||
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
|
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
|
||||||
|
|
||||||
if vpc_obj is None:
|
if vpc_obj is None:
|
||||||
try:
|
try:
|
||||||
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
|
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
|
||||||
changed = True
|
changed = True
|
||||||
except BotoServerError, e:
|
except BotoServerError, e:
|
||||||
module.fail_json(msg=e)
|
module.fail_json(msg=e)
|
||||||
|
|
||||||
if dhcp_id is not None:
|
if dhcp_id is not None:
|
||||||
try:
|
try:
|
||||||
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
|
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
|
||||||
changed = True
|
changed = True
|
||||||
except BotoServerError, e:
|
except BotoServerError, e:
|
||||||
module.fail_json(msg=e)
|
module.fail_json(msg=e)
|
||||||
|
|
||||||
if tags is not None or name is not None:
|
if tags is not None or name is not None:
|
||||||
try:
|
try:
|
||||||
if update_vpc_tags(connection, module, vpc_obj, tags, name):
|
if update_vpc_tags(connection, module, vpc_obj, tags, name):
|
||||||
changed = True
|
changed = True
|
||||||
except BotoServerError, e:
|
except BotoServerError, e:
|
||||||
module.fail_json(msg=e)
|
module.fail_json(msg=e)
|
||||||
|
|
||||||
|
|
||||||
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
|
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
|
||||||
# which is needed in order to detect the current status of DNS options. For now we just update
|
# which is needed in order to detect the current status of DNS options. For now we just update
|
||||||
|
@ -261,21 +257,21 @@ def main():
|
||||||
except BotoServerError, e:
|
except BotoServerError, e:
|
||||||
e_msg=boto_exception(e)
|
e_msg=boto_exception(e)
|
||||||
module.fail_json(msg=e_msg)
|
module.fail_json(msg=e_msg)
|
||||||
|
|
||||||
# get the vpc obj again in case it has changed
|
# get the vpc obj again in case it has changed
|
||||||
try:
|
try:
|
||||||
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
|
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
|
||||||
except BotoServerError, e:
|
except BotoServerError, e:
|
||||||
e_msg=boto_exception(e)
|
e_msg=boto_exception(e)
|
||||||
module.fail_json(msg=e_msg)
|
module.fail_json(msg=e_msg)
|
||||||
|
|
||||||
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
|
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
|
||||||
|
|
||||||
elif state == 'absent':
|
elif state == 'absent':
|
||||||
|
|
||||||
# Check if VPC exists
|
# Check if VPC exists
|
||||||
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
|
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
|
||||||
|
|
||||||
if vpc_obj is not None:
|
if vpc_obj is not None:
|
||||||
try:
|
try:
|
||||||
connection.delete_vpc(vpc_obj.id)
|
connection.delete_vpc(vpc_obj.id)
|
||||||
|
@ -285,11 +281,12 @@ def main():
|
||||||
e_msg = boto_exception(e)
|
e_msg = boto_exception(e)
|
||||||
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
|
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
|
||||||
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
|
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
|
||||||
|
|
||||||
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
|
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
|
||||||
|
|
||||||
# import module snippets
|
# import module snippets
|
||||||
from ansible.module_utils.basic import *
|
from ansible.module_utils.basic import *
|
||||||
from ansible.module_utils.ec2 import *
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
|
@ -116,7 +116,7 @@ except ImportError:
|
||||||
|
|
||||||
# returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group)
|
# returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group)
|
||||||
|
|
||||||
class NotModifiableError(StandardError):
|
class NotModifiableError(Exception):
|
||||||
def __init__(self, error_message, *args):
|
def __init__(self, error_message, *args):
|
||||||
super(NotModifiableError, self).__init__(error_message, *args)
|
super(NotModifiableError, self).__init__(error_message, *args)
|
||||||
self.error_message = error_message
|
self.error_message = error_message
|
||||||
|
@ -179,7 +179,7 @@ def modify_group(group, params, immediate=False):
|
||||||
new_params = dict(params)
|
new_params = dict(params)
|
||||||
|
|
||||||
for key in new_params.keys():
|
for key in new_params.keys():
|
||||||
if group.has_key(key):
|
if key in group:
|
||||||
param = group[key]
|
param = group[key]
|
||||||
new_value = new_params[key]
|
new_value = new_params[key]
|
||||||
|
|
||||||
|
@ -285,7 +285,6 @@ def main():
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
|
||||||
except BotoServerError, e:
|
except BotoServerError, e:
|
||||||
module.fail_json(msg = e.error_message)
|
module.fail_json(msg = e.error_message)
|
||||||
|
|
||||||
|
@ -301,4 +300,5 @@ def main():
|
||||||
from ansible.module_utils.basic import *
|
from ansible.module_utils.basic import *
|
||||||
from ansible.module_utils.ec2 import *
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
Loading…
Reference in a new issue