[cloud] make ec2 module pep8 (#22421)
* making ec2 pep8 * remove ec2 from pep8 legacy files * missed a couple * fix imports and remove iteritems * making group_id and group_name mutually exclusive and fixing whitespace
This commit is contained in:
parent
9857ce8ddb
commit
3aef028d42
2 changed files with 135 additions and 120 deletions
|
@ -37,7 +37,9 @@ options:
|
|||
id:
|
||||
version_added: "1.1"
|
||||
description:
|
||||
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
|
||||
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances.
|
||||
This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on.
|
||||
For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -57,7 +59,9 @@ options:
|
|||
region:
|
||||
version_added: "1.2"
|
||||
description:
|
||||
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
|
||||
- The AWS region to use. Must be specified if ec2_url is not used.
|
||||
If not specified then the value of the EC2_REGION environment variable, if any, is used.
|
||||
See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'aws_region', 'ec2_region' ]
|
||||
|
@ -77,7 +81,8 @@ options:
|
|||
tenancy:
|
||||
version_added: "1.9"
|
||||
description:
|
||||
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
|
||||
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC.
|
||||
Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
|
||||
required: false
|
||||
default: default
|
||||
choices: [ "default", "dedicated" ]
|
||||
|
@ -85,7 +90,8 @@ options:
|
|||
spot_price:
|
||||
version_added: "1.5"
|
||||
description:
|
||||
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started.
|
||||
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid.
|
||||
When it is filled, the instance is started.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -236,7 +242,10 @@ options:
|
|||
volumes:
|
||||
version_added: "1.5"
|
||||
description:
|
||||
- a list of hash/dictionaries of volumes to add to the new instance; '[{"key":"value", "key":"value"}]'; keys allowed are - device_name (str; required), delete_on_termination (bool; False), device_type (deprecated), ephemeral (str), encrypted (bool; False), snapshot (str), volume_type (str), iops (int) - device_type is deprecated use volume_type, iops must be set when volume_type='io1', ephemeral and snapshot are mutually exclusive.
|
||||
- a list of hash/dictionaries of volumes to add to the new instance; '[{"key":"value", "key":"value"}]'; keys allowed
|
||||
are - device_name (str; required), delete_on_termination (bool; False), device_type (deprecated), ephemeral (str),
|
||||
encrypted (bool; False), snapshot (str), volume_type (str), iops (int) - device_type is deprecated use volume_type,
|
||||
iops must be set when volume_type='io1', ephemeral and snapshot are mutually exclusive.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
|
@ -249,21 +258,26 @@ options:
|
|||
exact_count:
|
||||
version_added: "1.5"
|
||||
description:
|
||||
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value.
|
||||
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running.
|
||||
Instances are either created or terminated based on this value.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
count_tag:
|
||||
version_added: "1.5"
|
||||
description:
|
||||
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver". The specified tag must already exist or be passed in as the 'instance_tags' option.
|
||||
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running.
|
||||
This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers
|
||||
that are tagged with "class=webserver". The specified tag must already exist or be passed in as the 'instance_tags' option.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
network_interfaces:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are for creating a new network interface at launch.)
|
||||
- A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces,
|
||||
none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are
|
||||
for creating a new network interface at launch.)
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['network_interface']
|
||||
|
@ -601,9 +615,9 @@ EXAMPLES = '''
|
|||
|
||||
import time
|
||||
from ast import literal_eval
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.six import get_function_code
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect, connect_to_aws
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
try:
|
||||
|
@ -662,14 +676,14 @@ def get_reservations(module, ec2, tags=None, state=None, zone=None):
|
|||
for x in tags:
|
||||
if isinstance(x, dict):
|
||||
x = _set_none_to_blank(x)
|
||||
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in iteritems(x)))
|
||||
filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items()))
|
||||
else:
|
||||
filters.update({"tag-key": x})
|
||||
|
||||
# if dict, add the key and value to the filter
|
||||
if isinstance(tags, dict):
|
||||
tags = _set_none_to_blank(tags)
|
||||
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in iteritems(tags)))
|
||||
filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items()))
|
||||
|
||||
if state:
|
||||
# http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
|
||||
|
@ -682,6 +696,7 @@ def get_reservations(module, ec2, tags=None, state=None, zone=None):
|
|||
|
||||
return results
|
||||
|
||||
|
||||
def get_instance_info(inst):
|
||||
"""
|
||||
Retrieves instance information from an instance
|
||||
|
@ -712,7 +727,7 @@ def get_instance_info(inst):
|
|||
'groups': dict((group.id, group.name) for group in inst.groups),
|
||||
}
|
||||
try:
|
||||
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
|
||||
instance_info['virtualization_type'] = getattr(inst, 'virtualization_type')
|
||||
except AttributeError:
|
||||
instance_info['virtualization_type'] = None
|
||||
|
||||
|
@ -741,6 +756,7 @@ def get_instance_info(inst):
|
|||
|
||||
return instance_info
|
||||
|
||||
|
||||
def boto_supports_associate_public_ip_address(ec2):
|
||||
"""
|
||||
Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
|
||||
|
@ -759,6 +775,7 @@ def boto_supports_associate_public_ip_address(ec2):
|
|||
except AttributeError:
|
||||
return False
|
||||
|
||||
|
||||
def boto_supports_profile_name_arg(ec2):
|
||||
"""
|
||||
Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
|
||||
|
@ -771,6 +788,7 @@ def boto_supports_profile_name_arg(ec2):
|
|||
run_instances_method = getattr(ec2, 'run_instances')
|
||||
return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
|
||||
|
||||
|
||||
def boto_supports_volume_encryption():
|
||||
"""
|
||||
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
|
||||
|
@ -780,6 +798,7 @@ def boto_supports_volume_encryption():
|
|||
"""
|
||||
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
|
||||
|
||||
|
||||
def create_block_device(module, ec2, volume):
|
||||
# Not aware of a way to determine this programatically
|
||||
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
|
||||
|
@ -788,28 +807,28 @@ def create_block_device(module, ec2, volume):
|
|||
# device_type has been used historically to represent volume_type,
|
||||
# however ec2_vol uses volume_type, as does the BlockDeviceType, so
|
||||
# we add handling for either/or but not both
|
||||
if all(key in volume for key in ['device_type','volume_type']):
|
||||
module.fail_json(msg = 'device_type is a deprecated name for volume_type. Do not use both device_type and volume_type')
|
||||
if all(key in volume for key in ['device_type', 'volume_type']):
|
||||
module.fail_json(msg='device_type is a deprecated name for volume_type. Do not use both device_type and volume_type')
|
||||
|
||||
# get whichever one is set, or NoneType if neither are set
|
||||
volume_type = volume.get('device_type') or volume.get('volume_type')
|
||||
|
||||
if 'snapshot' not in volume and 'ephemeral' not in volume:
|
||||
if 'volume_size' not in volume:
|
||||
module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume')
|
||||
module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
|
||||
if 'snapshot' in volume:
|
||||
if volume_type == 'io1' and 'iops' not in volume:
|
||||
module.fail_json(msg = 'io1 volumes must have an iops value set')
|
||||
module.fail_json(msg='io1 volumes must have an iops value set')
|
||||
if 'iops' in volume:
|
||||
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
|
||||
size = volume.get('volume_size', snapshot.volume_size)
|
||||
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
|
||||
module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
|
||||
module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
|
||||
if 'encrypted' in volume:
|
||||
module.fail_json(msg = 'You can not set encryption when creating a volume from a snapshot')
|
||||
module.fail_json(msg='You can not set encryption when creating a volume from a snapshot')
|
||||
if 'ephemeral' in volume:
|
||||
if 'snapshot' in volume:
|
||||
module.fail_json(msg = 'Cannot set both ephemeral and snapshot')
|
||||
module.fail_json(msg='Cannot set both ephemeral and snapshot')
|
||||
if boto_supports_volume_encryption():
|
||||
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
|
||||
ephemeral_name=volume.get('ephemeral'),
|
||||
|
@ -826,6 +845,7 @@ def create_block_device(module, ec2, volume):
|
|||
delete_on_termination=volume.get('delete_on_termination', False),
|
||||
iops=volume.get('iops'))
|
||||
|
||||
|
||||
def boto_supports_param_in_spot_request(ec2, param):
|
||||
"""
|
||||
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
|
||||
|
@ -838,6 +858,7 @@ def boto_supports_param_in_spot_request(ec2, param):
|
|||
method = getattr(ec2, 'request_spot_instances')
|
||||
return param in get_function_code(method).co_varnames
|
||||
|
||||
|
||||
def await_spot_requests(module, ec2, spot_requests, count):
|
||||
"""
|
||||
Wait for a group of spot requests to be fulfilled, or fail.
|
||||
|
@ -861,13 +882,13 @@ def await_spot_requests(module, ec2, spot_requests, count):
|
|||
continue
|
||||
for sir in reqs:
|
||||
if sir.id != sirb.id:
|
||||
continue # this is not our spot instance
|
||||
continue # this is not our spot instance
|
||||
if sir.instance_id is not None:
|
||||
spot_req_inst_ids[sirb.id] = sir.instance_id
|
||||
elif sir.state == 'open':
|
||||
continue # still waiting, nothing to do here
|
||||
continue # still waiting, nothing to do here
|
||||
elif sir.state == 'active':
|
||||
continue # Instance is created already, nothing to do here
|
||||
continue # Instance is created already, nothing to do here
|
||||
elif sir.state == 'failed':
|
||||
module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
|
||||
sir.id, sir.status.code, sir.fault.code, sir.fault.message))
|
||||
|
@ -892,7 +913,7 @@ def await_spot_requests(module, ec2, spot_requests, count):
|
|||
time.sleep(5)
|
||||
else:
|
||||
return spot_req_inst_ids.values()
|
||||
module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
|
||||
module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime())
|
||||
|
||||
|
||||
def enforce_count(module, ec2, vpc):
|
||||
|
@ -928,10 +949,10 @@ def enforce_count(module, ec2, vpc):
|
|||
changed = True
|
||||
to_remove = len(instances) - exact_count
|
||||
if not checkmode:
|
||||
all_instance_ids = sorted([ x.id for x in instances ])
|
||||
all_instance_ids = sorted([x.id for x in instances])
|
||||
remove_ids = all_instance_ids[0:to_remove]
|
||||
|
||||
instances = [ x for x in instances if x.id not in remove_ids]
|
||||
instances = [x for x in instances if x.id not in remove_ids]
|
||||
|
||||
(changed, instance_dict_array, changed_instance_ids) \
|
||||
= terminate_instances(module, ec2, remove_ids)
|
||||
|
@ -1000,10 +1021,6 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
spot_launch_group = module.params.get('spot_launch_group')
|
||||
instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
|
||||
|
||||
# group_id and group_name are exclusive of each other
|
||||
if group_id and group_name:
|
||||
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
|
||||
|
||||
vpc_id = None
|
||||
if vpc_subnet_id:
|
||||
if not vpc:
|
||||
|
@ -1025,16 +1042,16 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
|
||||
if len(unmatched) > 0:
|
||||
module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
|
||||
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
|
||||
group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
|
||||
# Now we try to lookup the group id testing if group exists.
|
||||
elif group_id:
|
||||
#wrap the group_id in a list if it's not one already
|
||||
# wrap the group_id in a list if it's not one already
|
||||
if isinstance(group_id, basestring):
|
||||
group_id = [group_id]
|
||||
grp_details = ec2.get_all_security_groups(group_ids=group_id)
|
||||
group_name = [grp_item.name for grp_item in grp_details]
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg = str(e))
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# Lookup any instances that much our run id.
|
||||
|
||||
|
@ -1042,7 +1059,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
count_remaining = int(count)
|
||||
|
||||
if id is not None:
|
||||
filter_dict = {'client-token':id, 'instance-state-name' : 'running'}
|
||||
filter_dict = {'client-token': id, 'instance-state-name': 'running'}
|
||||
previous_reservations = ec2.get_all_instances(None, filter_dict)
|
||||
for res in previous_reservations:
|
||||
for prev_instance in res.instances:
|
||||
|
@ -1124,7 +1141,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
bdm = BlockDeviceMapping()
|
||||
for volume in volumes:
|
||||
if 'device_name' not in volume:
|
||||
module.fail_json(msg = 'Device name must be set for volume')
|
||||
module.fail_json(msg='Device name must be set for volume')
|
||||
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
|
||||
# to be a signal not to create this volume
|
||||
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
|
||||
|
@ -1135,27 +1152,31 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
# check to see if we're using spot pricing first before starting instances
|
||||
if not spot_price:
|
||||
if assign_public_ip and private_ip:
|
||||
params.update(dict(
|
||||
min_count = count_remaining,
|
||||
max_count = count_remaining,
|
||||
client_token = id,
|
||||
placement_group = placement_group,
|
||||
))
|
||||
params.update(
|
||||
dict(
|
||||
min_count=count_remaining,
|
||||
max_count=count_remaining,
|
||||
client_token=id,
|
||||
placement_group=placement_group,
|
||||
)
|
||||
)
|
||||
else:
|
||||
params.update(dict(
|
||||
min_count = count_remaining,
|
||||
max_count = count_remaining,
|
||||
client_token = id,
|
||||
placement_group = placement_group,
|
||||
private_ip_address = private_ip,
|
||||
))
|
||||
params.update(
|
||||
dict(
|
||||
min_count=count_remaining,
|
||||
max_count=count_remaining,
|
||||
client_token=id,
|
||||
placement_group=placement_group,
|
||||
private_ip_address=private_ip,
|
||||
)
|
||||
)
|
||||
|
||||
# For ordinary (not spot) instances, we can select 'stop'
|
||||
# (the default) or 'terminate' here.
|
||||
params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
|
||||
|
||||
res = ec2.run_instances(**params)
|
||||
instids = [ i.id for i in res.instances ]
|
||||
instids = [i.id for i in res.instances]
|
||||
while True:
|
||||
try:
|
||||
ec2.get_all_instances(instids)
|
||||
|
@ -1165,7 +1186,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
# there's a race between start and get an instance
|
||||
continue
|
||||
else:
|
||||
module.fail_json(msg = str(e))
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# The instances returned through ec2.run_instances above can be in
|
||||
# terminated state due to idempotency. See commit 7f11c3d for a complete
|
||||
|
@ -1174,9 +1195,9 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
str(instance.id) for instance in res.instances if instance.state == 'terminated'
|
||||
]
|
||||
if terminated_instances:
|
||||
module.fail_json(msg = "Instances with id(s) %s " % terminated_instances +
|
||||
"were created previously but have since been terminated - " +
|
||||
"use a (possibly different) 'instanceid' parameter")
|
||||
module.fail_json(msg="Instances with id(s) %s " % terminated_instances +
|
||||
"were created previously but have since been terminated - " +
|
||||
"use a (possibly different) 'instanceid' parameter")
|
||||
|
||||
else:
|
||||
if private_ip:
|
||||
|
@ -1184,7 +1205,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
msg='private_ip only available with on-demand (non-spot) instances')
|
||||
if boto_supports_param_in_spot_request(ec2, 'placement_group'):
|
||||
params['placement_group'] = placement_group
|
||||
elif placement_group :
|
||||
elif placement_group:
|
||||
module.fail_json(
|
||||
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
|
||||
|
||||
|
@ -1198,8 +1219,8 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
params['launch_group'] = spot_launch_group
|
||||
|
||||
params.update(dict(
|
||||
count = count_remaining,
|
||||
type = spot_type,
|
||||
count=count_remaining,
|
||||
type=spot_type,
|
||||
))
|
||||
res = ec2.request_spot_instances(spot_price, **params)
|
||||
|
||||
|
@ -1207,7 +1228,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
if wait:
|
||||
instids = await_spot_requests(module, ec2, res, count)
|
||||
except boto.exception.BotoServerError as e:
|
||||
module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))
|
||||
module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
# wait here until the instances are up
|
||||
num_running = 0
|
||||
|
@ -1224,7 +1245,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
|
||||
num_running = 0
|
||||
for res in res_list:
|
||||
num_running += len([ i for i in res.instances if i.state=='running' ])
|
||||
num_running += len([i for i in res.instances if i.state == 'running'])
|
||||
if len(res_list) <= 0:
|
||||
# got a bad response of some sort, possibly due to
|
||||
# stale/cached data. Wait a second and then try again
|
||||
|
@ -1237,9 +1258,9 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
|
||||
if wait and wait_timeout <= time.time():
|
||||
# waiting took too long
|
||||
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
|
||||
module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
|
||||
|
||||
#We do this after the loop ends so that we end up with one list
|
||||
# We do this after the loop ends so that we end up with one list
|
||||
for res in res_list:
|
||||
running_instances.extend(res.instances)
|
||||
|
||||
|
@ -1258,7 +1279,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
try:
|
||||
ec2.create_tags(instids, instance_tags)
|
||||
except boto.exception.EC2ResponseError as e:
|
||||
module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
|
||||
module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
instance_dict_array = []
|
||||
created_instance_ids = []
|
||||
|
@ -1315,9 +1336,8 @@ def terminate_instances(module, ec2, instance_ids):
|
|||
num_terminated = 0
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
|
||||
response = ec2.get_all_instances( \
|
||||
instance_ids=terminated_instance_ids, \
|
||||
filters={'instance-state-name':'terminated'})
|
||||
response = ec2.get_all_instances(instance_ids=terminated_instance_ids,
|
||||
filters={'instance-state-name': 'terminated'})
|
||||
try:
|
||||
num_terminated = sum([len(res.instances) for res in response])
|
||||
except Exception as e:
|
||||
|
@ -1331,15 +1351,13 @@ def terminate_instances(module, ec2, instance_ids):
|
|||
|
||||
# waiting took too long
|
||||
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
|
||||
module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime())
|
||||
#Lets get the current state of the instances after terminating - issue600
|
||||
module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime())
|
||||
# Lets get the current state of the instances after terminating - issue600
|
||||
instance_dict_array = []
|
||||
for res in ec2.get_all_instances(instance_ids=terminated_instance_ids,\
|
||||
filters={'instance-state-name':'terminated'}):
|
||||
for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}):
|
||||
for inst in res.instances:
|
||||
instance_dict_array.append(get_instance_info(inst))
|
||||
|
||||
|
||||
return (changed, instance_dict_array, terminated_instance_ids)
|
||||
|
||||
|
||||
|
@ -1385,7 +1403,7 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags):
|
|||
for key, value in instance_tags.items():
|
||||
filters["tag:" + key] = value
|
||||
|
||||
# Check that our instances are not in the state we want to take
|
||||
# Check that our instances are not in the state we want to take
|
||||
|
||||
# Check (and eventually change) instances attributes and instances state
|
||||
existing_instances_array = []
|
||||
|
@ -1411,8 +1429,7 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags):
|
|||
exception=traceback.format_exc())
|
||||
|
||||
# Check "termination_protection" attribute
|
||||
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection
|
||||
and termination_protection is not None):
|
||||
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
|
||||
inst.modify_attribute('disableApiTermination', termination_protection)
|
||||
changed = True
|
||||
|
||||
|
@ -1430,7 +1447,7 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags):
|
|||
existing_instances_array.append(inst.id)
|
||||
|
||||
instance_ids = list(set(existing_instances_array + (instance_ids or [])))
|
||||
## Wait for all the instances to finish starting or stopping
|
||||
# Wait for all the instances to finish starting or stopping
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
while wait and wait_timeout > time.time():
|
||||
instance_dict_array = []
|
||||
|
@ -1447,10 +1464,11 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags):
|
|||
|
||||
if wait and wait_timeout <= time.time():
|
||||
# waiting took too long
|
||||
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
|
||||
module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
|
||||
|
||||
return (changed, instance_dict_array, instance_ids)
|
||||
|
||||
|
||||
def restart_instances(module, ec2, instance_ids, state, instance_tags):
|
||||
"""
|
||||
Restarts a list of existing instances
|
||||
|
@ -1493,7 +1511,7 @@ def restart_instances(module, ec2, instance_ids, state, instance_tags):
|
|||
for key, value in instance_tags.items():
|
||||
filters["tag:" + key] = value
|
||||
|
||||
# Check that our instances are not in the state we want to take
|
||||
# Check that our instances are not in the state we want to take
|
||||
|
||||
# Check (and eventually change) instances attributes and instances state
|
||||
for res in ec2.get_all_instances(instance_ids, filters=filters):
|
||||
|
@ -1518,8 +1536,7 @@ def restart_instances(module, ec2, instance_ids, state, instance_tags):
|
|||
exception=traceback.format_exc())
|
||||
|
||||
# Check "termination_protection" attribute
|
||||
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection
|
||||
and termination_protection is not None):
|
||||
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
|
||||
inst.modify_attribute('disableApiTermination', termination_protection)
|
||||
changed = True
|
||||
|
||||
|
@ -1537,48 +1554,50 @@ def restart_instances(module, ec2, instance_ids, state, instance_tags):
|
|||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
key_name = dict(aliases = ['keypair']),
|
||||
id = dict(),
|
||||
group = dict(type='list', aliases=['groups']),
|
||||
group_id = dict(type='list'),
|
||||
zone = dict(aliases=['aws_zone', 'ec2_zone']),
|
||||
instance_type = dict(aliases=['type']),
|
||||
spot_price = dict(),
|
||||
spot_type = dict(default='one-time', choices=["one-time", "persistent"]),
|
||||
spot_launch_group = dict(),
|
||||
image = dict(),
|
||||
kernel = dict(),
|
||||
count = dict(type='int', default='1'),
|
||||
monitoring = dict(type='bool', default=False),
|
||||
ramdisk = dict(),
|
||||
wait = dict(type='bool', default=False),
|
||||
wait_timeout = dict(default=300),
|
||||
spot_wait_timeout = dict(default=600),
|
||||
placement_group = dict(),
|
||||
user_data = dict(),
|
||||
instance_tags = dict(type='dict'),
|
||||
vpc_subnet_id = dict(),
|
||||
assign_public_ip = dict(type='bool', default=False),
|
||||
private_ip = dict(),
|
||||
instance_profile_name = dict(),
|
||||
instance_ids = dict(type='list', aliases=['instance_id']),
|
||||
source_dest_check = dict(type='bool', default=True),
|
||||
termination_protection = dict(type='bool', default=None),
|
||||
state = dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
|
||||
instance_initiated_shutdown_behavior=dict(default=None, choices=['stop', 'terminate']),
|
||||
exact_count = dict(type='int', default=None),
|
||||
count_tag = dict(),
|
||||
volumes = dict(type='list'),
|
||||
ebs_optimized = dict(type='bool', default=False),
|
||||
tenancy = dict(default='default'),
|
||||
network_interfaces = dict(type='list', aliases=['network_interface'])
|
||||
)
|
||||
argument_spec.update(
|
||||
dict(
|
||||
key_name=dict(aliases=['keypair']),
|
||||
id=dict(),
|
||||
group=dict(type='list', aliases=['groups']),
|
||||
group_id=dict(type='list'),
|
||||
zone=dict(aliases=['aws_zone', 'ec2_zone']),
|
||||
instance_type=dict(aliases=['type']),
|
||||
spot_price=dict(),
|
||||
spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
|
||||
spot_launch_group=dict(),
|
||||
image=dict(),
|
||||
kernel=dict(),
|
||||
count=dict(type='int', default='1'),
|
||||
monitoring=dict(type='bool', default=False),
|
||||
ramdisk=dict(),
|
||||
wait=dict(type='bool', default=False),
|
||||
wait_timeout=dict(default=300),
|
||||
spot_wait_timeout=dict(default=600),
|
||||
placement_group=dict(),
|
||||
user_data=dict(),
|
||||
instance_tags=dict(type='dict'),
|
||||
vpc_subnet_id=dict(),
|
||||
assign_public_ip=dict(type='bool', default=False),
|
||||
private_ip=dict(),
|
||||
instance_profile_name=dict(),
|
||||
instance_ids=dict(type='list', aliases=['instance_id']),
|
||||
source_dest_check=dict(type='bool', default=True),
|
||||
termination_protection=dict(type='bool', default=None),
|
||||
state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
|
||||
instance_initiated_shutdown_behavior=dict(default=None, choices=['stop', 'terminate']),
|
||||
exact_count=dict(type='int', default=None),
|
||||
count_tag=dict(),
|
||||
volumes=dict(type='list'),
|
||||
ebs_optimized=dict(type='bool', default=False),
|
||||
tenancy=dict(default='default'),
|
||||
network_interfaces=dict(type='list', aliases=['network_interface'])
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive = [
|
||||
mutually_exclusive=[
|
||||
['group_name', 'group_id'],
|
||||
['exact_count', 'count'],
|
||||
['exact_count', 'state'],
|
||||
['exact_count', 'instance_ids'],
|
||||
|
@ -1601,7 +1620,7 @@ def main():
|
|||
try:
|
||||
vpc = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound as e:
|
||||
module.fail_json(msg = str(e))
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
vpc = None
|
||||
|
||||
|
@ -1644,9 +1663,6 @@ def main():
|
|||
|
||||
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -20,7 +20,6 @@ lib/ansible/modules/cloud/amazon/cloudformation_facts.py
|
|||
lib/ansible/modules/cloud/amazon/cloudtrail.py
|
||||
lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
|
||||
lib/ansible/modules/cloud/amazon/dynamodb_table.py
|
||||
lib/ansible/modules/cloud/amazon/ec2.py
|
||||
lib/ansible/modules/cloud/amazon/ec2_ami.py
|
||||
lib/ansible/modules/cloud/amazon/ec2_ami_find.py
|
||||
lib/ansible/modules/cloud/amazon/ec2_asg.py
|
||||
|
|
Loading…
Reference in a new issue