ec2_asg: max_instance_lifetime and respect wait on replace (#66863)

* ec2_asg: max_instance_lifetime and respect wait on replace

* ec2_asg: max_instance_lifetime integration tests

* ec2_asg: address review comments
This commit is contained in:
Andrej Svenke 2020-02-15 13:56:39 +01:00 committed by GitHub
parent d2f4d305ee
commit f98874e4f9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 240 additions and 165 deletions

View file

@ -0,0 +1,5 @@
minor_changes:
- 'ec2_asg: Migrated to AnsibleAWSModule'
- 'ec2_asg: Add support for Max Instance Lifetime'
bugfixes:
- 'ec2_asg: Ensure "wait" is honored during replace operations'

View file

@ -81,6 +81,13 @@ options:
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
type: int
max_instance_lifetime:
description:
- The maximum amount of time, in seconds, that an instance can be in service.
- Maximum instance lifetime must be equal to 0, between 604800 and 31536000 seconds (inclusive), or not specified.
- Value of 0 removes lifetime restriction.
version_added: "2.10"
type: int
mixed_instances_policy:
description:
- A mixed instance policy to use for the ASG.
@ -365,7 +372,6 @@ EXAMPLES = '''
tags:
- environment: production
propagate_at_launch: no
'''
RETURN = '''
@ -452,6 +458,11 @@ load_balancers:
returned: success
type: list
sample: ["elb-webapp-prod"]
max_instance_lifetime:
description: The maximum amount of time, in seconds, that an instance can be in service.
returned: success
type: int
sample: 604800
max_size:
description: Maximum size of group
returned: success
@ -511,7 +522,7 @@ target_group_names:
termination_policies:
description: A list of termination policies for the group.
returned: success
type: str
type: list
sample: ["Default"]
unhealthy_instances:
description: Number of instances in an unhealthy state
@ -544,8 +555,11 @@ import time
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict, get_aws_connection_info, AWSRetry
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (
AWSRetry,
camel_dict_to_snake_dict
)
try:
import botocore
@ -556,8 +570,9 @@ from ansible.module_utils.aws.core import AnsibleAWSModule
ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity',
'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName',
'LoadBalancerNames', 'MaxSize', 'MinSize', 'AutoScalingGroupName', 'PlacementGroup',
'TerminationPolicies', 'VPCZoneIdentifier')
'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize',
'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies',
'VPCZoneIdentifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
@ -693,37 +708,37 @@ def enforce_required_arguments_for_create():
def get_properties(autoscaling_group):
properties = dict()
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
properties = dict(
healthy_instances=0,
in_service_instances=0,
unhealthy_instances=0,
pending_instances=0,
viable_instances=0,
terminating_instances=0
)
instance_facts = dict()
autoscaling_group_instances = autoscaling_group.get('Instances')
if autoscaling_group_instances:
if autoscaling_group_instances:
properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances]
for i in autoscaling_group_instances:
if i.get('LaunchConfigurationName'):
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState'],
'launch_config_name': i['LaunchConfigurationName']}
elif i.get('LaunchTemplate'):
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState'],
'launch_template': i['LaunchTemplate']}
else:
instance_facts[i['InstanceId']] = {'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState']}
instance_facts[i['InstanceId']] = {
'health_status': i['HealthStatus'],
'lifecycle_state': i['LifecycleState']
}
if 'LaunchConfigurationName' in i:
instance_facts[i['InstanceId']]['launch_config_name'] = i['LaunchConfigurationName']
elif 'LaunchTemplate' in i:
instance_facts[i['InstanceId']]['launch_template'] = i['LaunchTemplate']
if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService':
properties['viable_instances'] += 1
if i['HealthStatus'] == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i['LifecycleState'] == 'InService':
properties['in_service_instances'] += 1
if i['LifecycleState'] == 'Terminating':
@ -739,11 +754,12 @@ def get_properties(autoscaling_group):
properties['created_time'] = autoscaling_group.get('CreatedTime')
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames')
if autoscaling_group.get('LaunchConfigurationName'):
if 'LaunchConfigurationName' in autoscaling_group:
properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName')
else:
properties['launch_template'] = autoscaling_group.get('LaunchTemplate')
properties['tags'] = autoscaling_group.get('Tags')
properties['max_instance_lifetime'] = autoscaling_group.get('MaxInstanceLifetime')
properties['min_size'] = autoscaling_group.get('MinSize')
properties['max_size'] = autoscaling_group.get('MaxSize')
properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity')
@ -764,19 +780,19 @@ def get_properties(autoscaling_group):
properties['metrics_collection'] = metrics
if properties['target_group_arns']:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
elbv2_connection = boto3_conn(module,
conn_type='client',
resource='elbv2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
elbv2_connection = module.client('elbv2')
tg_paginator = elbv2_connection.get_paginator('describe_target_groups')
tg_result = tg_paginator.paginate(TargetGroupArns=properties['target_group_arns']).build_full_result()
tg_result = tg_paginator.paginate(
TargetGroupArns=properties['target_group_arns']
).build_full_result()
target_groups = tg_result['TargetGroups']
else:
target_groups = []
properties['target_group_names'] = [tg['TargetGroupName'] for tg in target_groups]
properties['target_group_names'] = [
tg['TargetGroupName']
for tg in target_groups
]
return properties
@ -822,17 +838,11 @@ def get_launch_object(connection, ec2_connection):
def elb_dreg(asg_connection, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
wait_timeout = module.params.get('wait_timeout')
count = 1
if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB':
elb_connection = boto3_conn(module,
conn_type='client',
resource='elb',
region=region,
endpoint=ec2_url,
**aws_connect_params)
elb_connection = module.client('elb')
else:
return
@ -925,7 +935,6 @@ def tg_healthy(asg_connection, elbv2_connection, group_name):
def wait_for_elb(asg_connection, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
@ -934,12 +943,7 @@ def wait_for_elb(asg_connection, group_name):
if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB':
module.debug("Waiting for ELB to consider instances healthy.")
elb_connection = boto3_conn(module,
conn_type='client',
resource='elb',
region=region,
endpoint=ec2_url,
**aws_connect_params)
elb_connection = module.client('elb')
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
@ -955,7 +959,6 @@ def wait_for_elb(asg_connection, group_name):
def wait_for_target_group(asg_connection, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
@ -964,12 +967,7 @@ def wait_for_target_group(asg_connection, group_name):
if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB':
module.debug("Waiting for Target Group to consider instances healthy.")
elbv2_connection = boto3_conn(module,
conn_type='client',
resource='elbv2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
elbv2_connection = module.client('elbv2')
wait_timeout = time.time() + wait_timeout
healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
@ -1016,6 +1014,7 @@ def create_autoscaling_group(connection):
mixed_instances_policy = module.params.get('mixed_instances_policy')
min_size = module.params['min_size']
max_size = module.params['max_size']
max_instance_lifetime = module.params.get('max_instance_lifetime')
placement_group = module.params.get('placement_group')
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
@ -1031,19 +1030,14 @@ def create_autoscaling_group(connection):
metrics_collection = module.params.get('metrics_collection')
metrics_granularity = module.params.get('metrics_granularity')
metrics_list = module.params.get('metrics_list')
try:
as_groups = describe_autoscaling_groups(connection, group_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to describe auto scaling groups.",
exception=traceback.format_exc())
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
ec2_connection = boto3_conn(module,
conn_type='client',
resource='ec2',
region=region,
endpoint=ec2_url,
**aws_connect_params)
ec2_connection = module.client('ec2')
if vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
@ -1086,6 +1080,8 @@ def create_autoscaling_group(connection):
ag['LoadBalancerNames'] = load_balancers
if target_group_arns:
ag['TargetGroupARNs'] = target_group_arns
if max_instance_lifetime:
ag['MaxInstanceLifetime'] = max_instance_lifetime
launch_object = get_launch_object(connection, ec2_connection)
if 'LaunchConfigurationName' in launch_object:
@ -1283,6 +1279,9 @@ def create_autoscaling_group(connection):
ag['AvailabilityZones'] = availability_zones
if vpc_zone_identifier:
ag['VPCZoneIdentifier'] = vpc_zone_identifier
if max_instance_lifetime is not None:
ag['MaxInstanceLifetime'] = max_instance_lifetime
try:
update_asg(connection, **ag)
@ -1375,7 +1374,6 @@ def get_chunks(l, n):
def update_size(connection, group, max_size, min_size, dc):
module.debug("setting ASG sizes")
module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size))
updated_group = dict()
@ -1389,6 +1387,7 @@ def update_size(connection, group, max_size, min_size, dc):
def replace(connection):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
wait_for_instances = module.params.get('wait_for_instances')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
@ -1399,7 +1398,7 @@ def replace(connection):
lc_check = module.params.get('lc_check')
else:
lc_check = False
# Mirror above behaviour for Launch Templates
# Mirror above behavior for Launch Templates
launch_template = module.params.get('launch_template')
if launch_template:
lt_check = module.params.get('lt_check')
@ -1412,7 +1411,9 @@ def replace(connection):
if desired_capacity is None:
desired_capacity = as_group['DesiredCapacity']
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
if wait_for_instances:
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
props = get_properties(as_group)
instances = props['instances']
if replace_all_instances:
@ -1437,7 +1438,7 @@ def replace(connection):
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
changed = True
return(changed, props)
return changed, props
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
@ -1446,7 +1447,7 @@ def replace(connection):
if not old_instances:
changed = False
return(changed, props)
return changed, props
# check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
@ -1459,36 +1460,42 @@ def replace(connection):
as_group = describe_autoscaling_groups(connection, group_name)[0]
update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
if wait_for_instances:
wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
module.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False)
wait_for_term_inst(connection, term_instances)
wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
as_group = describe_autoscaling_groups(connection, group_name)[0]
if wait_for_instances:
wait_for_term_inst(connection, term_instances)
wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, group_name)
wait_for_target_group(connection, group_name)
if break_early:
module.debug("breaking loop")
break
update_size(connection, as_group, max_size, min_size, desired_capacity)
as_group = describe_autoscaling_groups(connection, group_name)[0]
asg_properties = get_properties(as_group)
module.debug("Rolling update complete.")
changed = True
return(changed, asg_properties)
return changed, asg_properties
def get_instances_by_launch_config(props, lc_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch config
@ -1509,6 +1516,7 @@ def get_instances_by_launch_config(props, lc_check, initial_instances):
new_instances.append(i)
else:
old_instances.append(i)
module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
@ -1535,6 +1543,7 @@ def get_instances_by_launch_template(props, lt_check, initial_instances):
new_instances.append(i)
else:
old_instances.append(i)
module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
@ -1546,23 +1555,25 @@ def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initi
instances = (inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if module.params.get('launch_config_name'):
if 'launch_config_name' in module.params:
if lc_check:
for i in instances:
if 'launch_template' in props['instance_facts'][i]:
instances_to_terminate.append(i)
elif props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
if (
'launch_template' in props['instance_facts'][i]
or props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']
):
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
elif module.params.get('launch_template'):
elif 'launch_template' in module.params:
if lt_check:
for i in instances:
if 'launch_config_name' in props['instance_facts'][i]:
instances_to_terminate.append(i)
elif props['instance_facts'][i]['launch_template'] != props['launch_template']:
if (
'launch_config_name' in props['instance_facts'][i]
or props['instance_facts'][i]['launch_template'] != props['launch_template']
):
instances_to_terminate.append(i)
else:
for i in instances:
@ -1666,7 +1677,6 @@ def wait_for_term_inst(connection, term_instances):
def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group)
@ -1692,56 +1702,66 @@ def asg_exists(connection):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
target_group_arns=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
launch_template=dict(type='dict',
default=None,
options=dict(
version=dict(type='str'),
launch_template_name=dict(type='str'),
launch_template_id=dict(type='str'),
),
),
mixed_instances_policy=dict(type='dict',
default=None,
options=dict(
instance_types=dict(type='list', elements='str'),
)),
min_size=dict(type='int'),
max_size=dict(type='int'),
placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
lt_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(type='list', default=[
argument_spec = dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
target_group_arns=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
launch_template=dict(
type='dict',
default=None,
options=dict(
version=dict(type='str'),
launch_template_name=dict(type='str'),
launch_template_id=dict(type='str'),
)
),
min_size=dict(type='int'),
max_size=dict(type='int'),
max_instance_lifetime=dict(type='int'),
mixed_instances_policy=dict(
type='dict',
default=None,
options=dict(
instance_types=dict(
type='list',
elements='str'
),
)
),
placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
lt_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(
type='list',
default=[
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
]),
suspend_processes=dict(type='list', default=[]),
metrics_collection=dict(type='bool', default=False),
metrics_granularity=dict(type='str', default='1Minute'),
metrics_list=dict(type='list', default=[
]
),
suspend_processes=dict(type='list', default=[]),
metrics_collection=dict(type='bool', default=False),
metrics_granularity=dict(type='str', default='1Minute'),
metrics_list=dict(
type='list',
default=[
'GroupMinSize',
'GroupMaxSize',
'GroupDesiredCapacity',
@ -1750,8 +1770,8 @@ def main():
'GroupStandbyInstances',
'GroupTerminatingInstances',
'GroupTotalInstances'
])
),
]
)
)
global module
@ -1759,26 +1779,31 @@ def main():
argument_spec=argument_spec,
mutually_exclusive=[
['replace_all_instances', 'replace_instances'],
['launch_config_name', 'launch_template']]
['launch_config_name', 'launch_template']
]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
if (
module.params.get('max_instance_lifetime') is not None
and not module.botocore_at_least('1.13.21')
):
module.fail_json(
msg='Botocore needs to be version 1.13.21 or higher to use max_instance_lifetime.'
)
if module.params.get('mixed_instance_type') and not module.botocore_at_least('1.12.45'):
module.fail_json(msg="mixed_instance_type is only supported with botocore >= 1.12.45")
if (
module.params.get('mixed_instances_policy') is not None
and not module.botocore_at_least('1.12.45')
):
module.fail_json(
msg='Botocore needs to be version 1.12.45 or higher to use mixed_instances_policy.'
)
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module,
conn_type='client',
resource='autoscaling',
region=region,
endpoint=ec2_url,
**aws_connect_params)
connection = module.client('autoscaling')
changed = create_changed = replace_changed = False
exists = asg_exists(connection)
@ -1789,10 +1814,16 @@ def main():
module.exit_json(changed=changed)
# Only replace instances if asg existed at start of call
if exists and (replace_all_instances or replace_instances) and (module.params.get('launch_config_name') or module.params.get('launch_template')):
if (
exists
and (replace_all_instances or replace_instances)
and (module.params.get('launch_config_name') or module.params.get('launch_template'))
):
replace_changed, asg_properties = replace(connection)
if create_changed or replace_changed:
changed = True
module.exit_json(changed=changed, **asg_properties)

View file

@ -95,7 +95,7 @@
# was created
set_fact:
load_balancer_name: "{{ item }}-lb"
with_items: "{{ resource_prefix | regex_findall('.{8}$') }}"
loop: "{{ resource_prefix | regex_findall('.{8}$') }}"
# Set up the testing dependencies: VPC, subnet, security group, and two launch configurations
@ -164,7 +164,7 @@
- "service httpd start"
security_groups: "{{ sg.group_id }}"
instance_type: t3.micro
with_items:
loop:
- "{{ resource_prefix }}-lc"
- "{{ resource_prefix }}-lc-2"
@ -314,6 +314,10 @@
name: "{{ resource_prefix }}-asg"
state: absent
wait_timeout: 800
register: output
retries: 3
until: output is succeeded
delay: 10
async: 400
# ============================================================
@ -411,6 +415,43 @@
# ============================================================
# Test max_instance_lifetime option
- name: enable asg max_instance_lifetime
ec2_asg:
name: "{{ resource_prefix }}-asg"
max_instance_lifetime: 604801
register: output
- name: ensure max_instance_lifetime is set
assert:
that:
- output.max_instance_lifetime == 604801
- name: run without max_instance_lifetime
ec2_asg:
name: "{{ resource_prefix }}-asg"
launch_config_name: "{{ resource_prefix }}-lc"
- name: ensure max_instance_lifetime not affected by defaults
assert:
that:
- output.max_instance_lifetime == 604801
- name: disable asg max_instance_lifetime
ec2_asg:
name: "{{ resource_prefix }}-asg"
launch_config_name: "{{ resource_prefix }}-lc"
max_instance_lifetime: 0
register: output
- name: ensure max_instance_lifetime is not set
assert:
that:
- not output.max_instance_lifetime
# ============================================================
# # perform rolling replace with different launch configuration
- name: perform rolling update to new AMI
@ -434,7 +475,7 @@
- assert:
that:
- "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'"
with_dict: "{{ output.instance_facts }}"
loop: "{{ output.instance_facts | dict2items }}"
# assert they are all healthy and that the rolling update resulted in the appropriate number of instances
- assert:
@ -466,7 +507,7 @@
- assert:
that:
- "item.value.launch_config_name == '{{ resource_prefix }}-lc'"
with_dict: "{{ output.instance_facts }}"
loop: "{{ output.instance_facts | dict2items }}"
# assert they are all healthy and that the rolling update resulted in the appropriate number of instances
# there should be the same number of instances as there were before the rolling update was performed
@ -502,22 +543,21 @@
poll: 0
register: asg_job
- name: get ec2_asg facts for 3 minutes
- name: get ec2_asg info for 3 minutes
ec2_asg_info:
name: "{{ resource_prefix }}-asg"
register: output
loop_control:
pause: 15
with_sequence: count=12
- set_fact:
inst_id_json_query: 'results[*].results[*].instances[*].instance_id'
loop: "{{ range(12) | list }}"
# Since we started with 3 servers and replace all of them.
# We should see 6 servers total.
- assert:
that:
- "lookup('flattened',output|json_query(inst_id_json_query)).split(',')|unique|length == 6"
- output | json_query(inst_id_json_query) | unique | length == 6
vars:
inst_id_json_query: results[].results[].instances[].instance_id
- name: Ensure ec2_asg task completes
async_status: jid="{{ asg_job.ansible_job_id }}"
@ -568,16 +608,15 @@
register: output
loop_control:
pause: 15
with_sequence: count=12
- set_fact:
inst_id_json_query: 'results[*].results[*].instances[*].instance_id'
loop: "{{ range(12) | list }}"
# Get all instance_ids we saw and assert we saw number expected
# Should only see 3 (don't replace instances we just created)
- assert:
that:
- "lookup('flattened',output|json_query(inst_id_json_query)).split(',')|unique|length == 3"
- output | json_query(inst_id_json_query) | unique | length == 3
vars:
inst_id_json_query: results[].results[].instances[].instance_id
- name: Ensure ec2_asg task completes
async_status: jid="{{ asg_job.ansible_job_id }}"
@ -673,7 +712,7 @@
until: removed is not failed
ignore_errors: yes
retries: 10
with_items:
loop:
- "{{ resource_prefix }}-lc"
- "{{ resource_prefix }}-lc-2"