2014-03-01 17:06:19 +01:00
|
|
|
#!/usr/bin/python
|
|
|
|
# This file is part of Ansible
|
|
|
|
#
|
|
|
|
# Ansible is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# Ansible is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
DOCUMENTATION = """
|
|
|
|
---
|
|
|
|
module: ec2_asg
|
|
|
|
short_description: Create or delete AWS Autoscaling Groups
|
|
|
|
description:
|
|
|
|
- Can create or delete AWS Autoscaling Groups
|
|
|
|
- Works with the ec2_lc module to manage Launch Configurations
|
2014-03-02 12:58:22 +01:00
|
|
|
version_added: "1.6"
|
2015-06-15 20:41:22 +02:00
|
|
|
author: "Gareth Rushgrove (@garethr)"
|
2014-03-01 17:06:19 +01:00
|
|
|
options:
|
|
|
|
state:
|
|
|
|
description:
|
|
|
|
- register or deregister the instance
|
|
|
|
required: true
|
|
|
|
choices: ['present', 'absent']
|
2014-03-02 13:00:50 +01:00
|
|
|
name:
|
2014-03-01 17:06:19 +01:00
|
|
|
description:
|
|
|
|
- Unique name for group to be created or deleted
|
|
|
|
required: true
|
|
|
|
load_balancers:
|
|
|
|
description:
|
|
|
|
- List of ELB names to use for the group
|
2014-03-06 03:14:53 +01:00
|
|
|
required: false
|
2014-03-01 17:06:19 +01:00
|
|
|
availability_zones:
|
|
|
|
description:
|
2014-04-25 01:58:39 +02:00
|
|
|
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
|
2014-04-25 01:56:26 +02:00
|
|
|
required: false
|
2014-03-01 17:06:19 +01:00
|
|
|
launch_config_name:
|
|
|
|
description:
|
|
|
|
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
|
2015-07-02 03:58:17 +02:00
|
|
|
required: true
|
2014-03-01 17:06:19 +01:00
|
|
|
min_size:
|
|
|
|
description:
|
|
|
|
- Minimum number of instances in group
|
2014-03-06 03:14:53 +01:00
|
|
|
required: false
|
2014-03-01 17:06:19 +01:00
|
|
|
max_size:
|
|
|
|
description:
|
|
|
|
- Maximum number of instances in group
|
2014-03-06 03:14:53 +01:00
|
|
|
required: false
|
|
|
|
desired_capacity:
|
|
|
|
description:
|
|
|
|
- Desired number of instances in group
|
|
|
|
required: false
|
2014-08-25 07:18:41 +02:00
|
|
|
replace_all_instances:
|
|
|
|
description:
|
2015-05-04 13:54:03 +02:00
|
|
|
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
|
2014-08-25 07:18:41 +02:00
|
|
|
required: false
|
|
|
|
version_added: "1.8"
|
|
|
|
default: False
|
|
|
|
replace_batch_size:
|
|
|
|
description:
|
|
|
|
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
|
|
|
|
required: false
|
|
|
|
version_added: "1.8"
|
2015-09-10 15:07:38 +02:00
|
|
|
default: 1
|
2014-08-25 07:18:41 +02:00
|
|
|
replace_instances:
|
|
|
|
description:
|
|
|
|
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
|
|
|
|
required: false
|
|
|
|
version_added: "1.8"
|
|
|
|
default: None
|
|
|
|
lc_check:
|
|
|
|
description:
|
|
|
|
- Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config.
|
|
|
|
required: false
|
|
|
|
version_added: "1.8"
|
|
|
|
default: True
|
2014-03-06 03:14:53 +01:00
|
|
|
vpc_zone_identifier:
|
|
|
|
description:
|
|
|
|
- List of VPC subnets to use
|
|
|
|
required: false
|
|
|
|
default: None
|
2014-05-19 16:52:30 +02:00
|
|
|
tags:
|
|
|
|
description:
|
2014-07-03 17:29:37 +02:00
|
|
|
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
|
2014-06-19 07:10:59 +02:00
|
|
|
required: false
|
|
|
|
default: None
|
|
|
|
version_added: "1.7"
|
2014-06-23 12:09:31 +02:00
|
|
|
health_check_period:
|
|
|
|
description:
|
|
|
|
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
|
|
|
|
required: false
|
|
|
|
default: 500 seconds
|
|
|
|
version_added: "1.7"
|
|
|
|
health_check_type:
|
|
|
|
description:
|
|
|
|
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
|
|
|
|
required: false
|
|
|
|
default: EC2
|
|
|
|
version_added: "1.7"
|
2014-08-04 22:25:41 +02:00
|
|
|
choices: ['EC2', 'ELB']
|
2015-07-09 21:29:00 +02:00
|
|
|
default_cooldown:
|
|
|
|
description:
|
2015-09-06 17:18:30 +02:00
|
|
|
- The number of seconds after a scaling activity completes before another can begin.
|
2015-07-09 21:29:00 +02:00
|
|
|
required: false
|
|
|
|
default: 300 seconds
|
|
|
|
version_added: "2.0"
|
2014-08-25 07:18:41 +02:00
|
|
|
wait_timeout:
|
|
|
|
description:
|
|
|
|
- how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option.
|
|
|
|
default: 300
|
|
|
|
version_added: "1.8"
|
2015-01-08 23:24:44 +01:00
|
|
|
wait_for_instances:
|
|
|
|
description:
|
2015-03-20 19:59:34 +01:00
|
|
|
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy".
|
2015-01-08 23:24:44 +01:00
|
|
|
version_added: "1.9"
|
|
|
|
default: yes
|
|
|
|
required: False
|
2015-08-21 19:13:09 +02:00
|
|
|
termination_policies:
|
|
|
|
description:
|
|
|
|
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
|
2015-09-10 15:07:38 +02:00
|
|
|
- For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained
|
2015-08-21 19:13:09 +02:00
|
|
|
required: false
|
2015-09-10 15:07:38 +02:00
|
|
|
default: Default
|
2015-08-21 19:13:09 +02:00
|
|
|
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
|
|
|
|
version_added: "2.0"
|
2015-10-07 22:16:24 +02:00
|
|
|
extends_documentation_fragment:
|
|
|
|
- aws
|
|
|
|
- ec2
|
2014-03-01 17:06:19 +01:00
|
|
|
"""
|
|
|
|
|
|
|
|
EXAMPLES = '''
|
2014-12-01 21:14:57 +01:00
|
|
|
# Basic configuration
|
2014-09-10 23:45:24 +02:00
|
|
|
|
2014-03-06 03:14:53 +01:00
|
|
|
- ec2_asg:
|
2014-03-02 13:00:50 +01:00
|
|
|
name: special
|
2014-12-01 21:14:57 +01:00
|
|
|
load_balancers: [ 'lb1', 'lb2' ]
|
|
|
|
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
|
2014-03-01 17:06:19 +01:00
|
|
|
launch_config_name: 'lc-1'
|
|
|
|
min_size: 1
|
|
|
|
max_size: 10
|
2014-03-06 03:14:53 +01:00
|
|
|
desired_capacity: 5
|
2014-12-01 21:14:57 +01:00
|
|
|
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
|
2014-07-03 17:29:37 +02:00
|
|
|
tags:
|
|
|
|
- environment: production
|
|
|
|
propagate_at_launch: no
|
|
|
|
|
2014-12-01 21:14:57 +01:00
|
|
|
# Rolling ASG Updates
|
|
|
|
|
2014-09-10 23:45:24 +02:00
|
|
|
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
|
2014-05-19 16:52:30 +02:00
|
|
|
|
2014-08-25 07:18:41 +02:00
|
|
|
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
|
|
|
|
a rolling fashion with instances using the current launch configuration, "my_new_lc".
|
2014-09-10 23:45:24 +02:00
|
|
|
|
2014-08-25 07:18:41 +02:00
|
|
|
This could also be considered a rolling deploy of a pre-baked AMI.
|
|
|
|
|
|
|
|
If this is a newly created group, the instances will not be replaced since all instances
|
|
|
|
will have the current launch configuration.
|
|
|
|
|
|
|
|
- name: create launch config
|
|
|
|
ec2_lc:
|
|
|
|
name: my_new_lc
|
|
|
|
image_id: ami-lkajsf
|
|
|
|
key_name: mykey
|
|
|
|
region: us-east-1
|
|
|
|
security_groups: sg-23423
|
|
|
|
instance_type: m1.small
|
|
|
|
assign_public_ip: yes
|
|
|
|
|
|
|
|
- ec2_asg:
|
|
|
|
name: myasg
|
|
|
|
launch_config_name: my_new_lc
|
|
|
|
health_check_period: 60
|
|
|
|
health_check_type: ELB
|
|
|
|
replace_all_instances: yes
|
|
|
|
min_size: 5
|
|
|
|
max_size: 5
|
|
|
|
desired_capacity: 5
|
|
|
|
region: us-east-1
|
|
|
|
|
2014-09-10 23:45:24 +02:00
|
|
|
To only replace a couple of instances instead of all of them, supply a list
|
2014-08-25 07:18:41 +02:00
|
|
|
to "replace_instances":
|
|
|
|
|
|
|
|
- ec2_asg:
|
|
|
|
name: myasg
|
|
|
|
launch_config_name: my_new_lc
|
|
|
|
health_check_period: 60
|
|
|
|
health_check_type: ELB
|
|
|
|
replace_instances:
|
|
|
|
- i-b345231
|
|
|
|
- i-24c2931
|
|
|
|
min_size: 5
|
|
|
|
max_size: 5
|
|
|
|
desired_capacity: 5
|
|
|
|
region: us-east-1
|
2014-03-01 17:06:19 +01:00
|
|
|
'''
|
|
|
|
|
|
|
|
import time
|
2015-03-28 02:41:53 +01:00
|
|
|
import logging as log
|
2014-03-01 17:06:19 +01:00
|
|
|
|
|
|
|
from ansible.module_utils.basic import *
|
|
|
|
from ansible.module_utils.ec2 import *
|
2015-03-28 02:41:53 +01:00
|
|
|
log.getLogger('boto').setLevel(log.CRITICAL)
|
|
|
|
#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
|
|
|
|
|
2014-03-01 17:06:19 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
import boto.ec2.autoscale
|
2014-05-19 16:52:30 +02:00
|
|
|
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag
|
2014-03-01 17:06:19 +01:00
|
|
|
from boto.exception import BotoServerError
|
2015-04-02 01:16:54 +02:00
|
|
|
HAS_BOTO = True
|
2014-03-01 17:06:19 +01:00
|
|
|
except ImportError:
|
2015-04-02 01:16:54 +02:00
|
|
|
HAS_BOTO = False
|
2014-03-01 17:06:19 +01:00
|
|
|
|
2014-08-20 00:25:47 +02:00
|
|
|
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
|
|
|
|
'health_check_period', 'health_check_type', 'launch_config_name',
|
|
|
|
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
|
2014-11-12 01:47:34 +01:00
|
|
|
'termination_policies', 'vpc_zone_identifier')
|
2014-03-06 03:14:53 +01:00
|
|
|
|
2014-08-25 07:18:41 +02:00
|
|
|
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
|
|
|
|
|
2014-03-06 03:14:53 +01:00
|
|
|
def enforce_required_arguments(module):
|
|
|
|
''' As many arguments are not required for autoscale group deletion
|
|
|
|
they cannot be mandatory arguments for the module, so we enforce
|
|
|
|
them here '''
|
|
|
|
missing_args = []
|
2014-04-25 01:56:26 +02:00
|
|
|
for arg in ('min_size', 'max_size', 'launch_config_name'):
|
2014-03-06 03:14:53 +01:00
|
|
|
if module.params[arg] is None:
|
|
|
|
missing_args.append(arg)
|
|
|
|
if missing_args:
|
|
|
|
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
|
2014-03-01 17:06:19 +01:00
|
|
|
|
|
|
|
|
2014-05-19 16:52:30 +02:00
|
|
|
def get_properties(autoscaling_group):
|
|
|
|
properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES)
|
2015-02-04 05:56:19 +01:00
|
|
|
|
2015-03-20 17:52:19 +01:00
|
|
|
# Ugly hack to make this JSON-serializable. We take a list of boto Tag
|
|
|
|
# objects and replace them with a dict-representation. Needed because the
|
|
|
|
# tags are included in ansible's return value (which is jsonified)
|
|
|
|
if 'tags' in properties and isinstance(properties['tags'], list):
|
2015-02-04 05:56:19 +01:00
|
|
|
serializable_tags = {}
|
|
|
|
for tag in properties['tags']:
|
|
|
|
serializable_tags[tag.key] = [tag.value, tag.propagate_at_launch]
|
|
|
|
properties['tags'] = serializable_tags
|
|
|
|
|
2014-08-25 07:18:41 +02:00
|
|
|
properties['healthy_instances'] = 0
|
|
|
|
properties['in_service_instances'] = 0
|
|
|
|
properties['unhealthy_instances'] = 0
|
|
|
|
properties['pending_instances'] = 0
|
|
|
|
properties['viable_instances'] = 0
|
|
|
|
properties['terminating_instances'] = 0
|
|
|
|
|
2015-10-09 14:35:26 +02:00
|
|
|
instance_facts = {}
|
|
|
|
|
2014-05-19 16:52:30 +02:00
|
|
|
if autoscaling_group.instances:
|
|
|
|
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
|
2014-08-25 07:18:41 +02:00
|
|
|
for i in autoscaling_group.instances:
|
|
|
|
instance_facts[i.instance_id] = {'health_status': i.health_status,
|
|
|
|
'lifecycle_state': i.lifecycle_state,
|
|
|
|
'launch_config_name': i.launch_config_name }
|
|
|
|
if i.health_status == 'Healthy' and i.lifecycle_state == 'InService':
|
|
|
|
properties['viable_instances'] += 1
|
|
|
|
if i.health_status == 'Healthy':
|
|
|
|
properties['healthy_instances'] += 1
|
|
|
|
else:
|
|
|
|
properties['unhealthy_instances'] += 1
|
|
|
|
if i.lifecycle_state == 'InService':
|
|
|
|
properties['in_service_instances'] += 1
|
|
|
|
if i.lifecycle_state == 'Terminating':
|
|
|
|
properties['terminating_instances'] += 1
|
|
|
|
if i.lifecycle_state == 'Pending':
|
|
|
|
properties['pending_instances'] += 1
|
2015-10-09 14:35:26 +02:00
|
|
|
properties['instance_facts'] = instance_facts
|
2014-05-19 16:52:30 +02:00
|
|
|
properties['load_balancers'] = autoscaling_group.load_balancers
|
2014-11-12 18:04:43 +01:00
|
|
|
|
2014-12-08 18:55:27 +01:00
|
|
|
if getattr(autoscaling_group, "tags", None):
|
2014-11-12 18:04:43 +01:00
|
|
|
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
|
|
|
|
|
2015-03-28 02:41:53 +01:00
|
|
|
|
2014-05-19 16:52:30 +02:00
|
|
|
return properties
|
|
|
|
|
2015-03-28 02:41:53 +01:00
|
|
|
def elb_dreg(asg_connection, module, group_name, instance_id):
|
|
|
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
|
|
|
as_group = asg_connection.get_all_groups(names=[group_name])[0]
|
|
|
|
wait_timeout = module.params.get('wait_timeout')
|
|
|
|
props = get_properties(as_group)
|
|
|
|
count = 1
|
|
|
|
if as_group.load_balancers and as_group.health_check_type == 'ELB':
|
|
|
|
try:
|
|
|
|
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
|
|
|
|
except boto.exception.NoAuthHandlerFound, e:
|
|
|
|
module.fail_json(msg=str(e))
|
|
|
|
else:
|
|
|
|
return
|
|
|
|
|
|
|
|
exists = True
|
|
|
|
for lb in as_group.load_balancers:
|
|
|
|
elb_connection.deregister_instances(lb, instance_id)
|
|
|
|
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
|
|
|
|
|
|
|
|
wait_timeout = time.time() + wait_timeout
|
|
|
|
while wait_timeout > time.time() and count > 0:
|
|
|
|
count = 0
|
|
|
|
for lb in as_group.load_balancers:
|
|
|
|
lb_instances = elb_connection.describe_instance_health(lb)
|
|
|
|
for i in lb_instances:
|
|
|
|
if i.instance_id == instance_id and i.state == "InService":
|
|
|
|
count += 1
|
|
|
|
log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description))
|
|
|
|
time.sleep(10)
|
|
|
|
|
|
|
|
if wait_timeout <= time.time():
|
|
|
|
# waiting took too long
|
|
|
|
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def elb_healthy(asg_connection, elb_connection, module, group_name):
|
|
|
|
healthy_instances = []
|
|
|
|
as_group = asg_connection.get_all_groups(names=[group_name])[0]
|
|
|
|
props = get_properties(as_group)
|
|
|
|
# get healthy, inservice instances from ASG
|
|
|
|
instances = []
|
|
|
|
for instance, settings in props['instance_facts'].items():
|
|
|
|
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
|
|
|
|
instances.append(instance)
|
|
|
|
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
|
|
|
|
log.debug("ELB instance status:")
|
|
|
|
for lb in as_group.load_balancers:
|
|
|
|
# we catch a race condition that sometimes happens if the instance exists in the ASG
|
|
|
|
# but has not yet show up in the ELB
|
|
|
|
try:
|
|
|
|
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
|
|
|
|
except boto.exception.InvalidInstance, e:
|
|
|
|
pass
|
|
|
|
for i in lb_instances:
|
|
|
|
if i.state == "InService":
|
|
|
|
healthy_instances.append(i.instance_id)
|
|
|
|
log.debug("{0}: {1}".format(i.instance_id, i.state))
|
|
|
|
return len(healthy_instances)
|
|
|
|
|
|
|
|
|
2014-05-19 16:52:30 +02:00
|
|
|
|
2015-01-08 23:24:44 +01:00
|
|
|
def wait_for_elb(asg_connection, module, group_name):
|
|
|
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
|
|
|
wait_timeout = module.params.get('wait_timeout')
|
|
|
|
|
|
|
|
# if the health_check_type is ELB, we want to query the ELBs directly for instance
|
|
|
|
# status as to avoid health_check_grace period that is awarded to ASG instances
|
|
|
|
as_group = asg_connection.get_all_groups(names=[group_name])[0]
|
2014-03-06 03:14:53 +01:00
|
|
|
|
2015-01-08 23:24:44 +01:00
|
|
|
if as_group.load_balancers and as_group.health_check_type == 'ELB':
|
2015-03-28 02:41:53 +01:00
|
|
|
log.debug("Waiting for ELB to consider intances healthy.")
|
2015-01-08 23:24:44 +01:00
|
|
|
try:
|
|
|
|
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
|
|
|
|
except boto.exception.NoAuthHandlerFound, e:
|
|
|
|
module.fail_json(msg=str(e))
|
2015-03-20 06:35:21 +01:00
|
|
|
|
2015-01-08 23:24:44 +01:00
|
|
|
wait_timeout = time.time() + wait_timeout
|
2015-03-28 02:41:53 +01:00
|
|
|
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
|
2015-01-08 23:24:44 +01:00
|
|
|
|
2015-03-28 02:41:53 +01:00
|
|
|
while healthy_instances < as_group.min_size and wait_timeout > time.time():
|
|
|
|
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
|
|
|
|
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
|
2015-01-08 23:24:44 +01:00
|
|
|
time.sleep(10)
|
|
|
|
if wait_timeout <= time.time():
|
|
|
|
# waiting took too long
|
|
|
|
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
|
2015-03-28 02:41:53 +01:00
|
|
|
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
|
2015-01-08 23:24:44 +01:00
|
|
|
|
|
|
|
def create_autoscaling_group(connection, module):
|
2014-03-01 17:06:19 +01:00
|
|
|
group_name = module.params.get('name')
|
|
|
|
load_balancers = module.params['load_balancers']
|
|
|
|
availability_zones = module.params['availability_zones']
|
|
|
|
launch_config_name = module.params.get('launch_config_name')
|
2014-03-06 03:14:53 +01:00
|
|
|
min_size = module.params['min_size']
|
|
|
|
max_size = module.params['max_size']
|
|
|
|
desired_capacity = module.params.get('desired_capacity')
|
|
|
|
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
|
2014-05-19 16:52:30 +02:00
|
|
|
set_tags = module.params.get('tags')
|
2014-06-23 12:09:31 +02:00
|
|
|
health_check_period = module.params.get('health_check_period')
|
|
|
|
health_check_type = module.params.get('health_check_type')
|
2015-07-09 21:29:00 +02:00
|
|
|
default_cooldown = module.params.get('default_cooldown')
|
2015-01-08 23:24:44 +01:00
|
|
|
wait_for_instances = module.params.get('wait_for_instances')
|
2014-03-06 03:14:53 +01:00
|
|
|
as_groups = connection.get_all_groups(names=[group_name])
|
2015-01-08 23:24:44 +01:00
|
|
|
wait_timeout = module.params.get('wait_timeout')
|
2015-09-15 00:31:22 +02:00
|
|
|
termination_policies = module.params.get('termination_policies')
|
2014-03-06 03:14:53 +01:00
|
|
|
|
2014-04-25 01:56:26 +02:00
|
|
|
if not vpc_zone_identifier and not availability_zones:
|
|
|
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
|
|
|
try:
|
|
|
|
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
2014-11-01 03:44:44 +01:00
|
|
|
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
2014-04-25 01:56:26 +02:00
|
|
|
module.fail_json(msg=str(e))
|
2014-12-06 03:04:16 +01:00
|
|
|
elif vpc_zone_identifier:
|
|
|
|
vpc_zone_identifier = ','.join(vpc_zone_identifier)
|
2014-05-19 16:52:30 +02:00
|
|
|
|
|
|
|
asg_tags = []
|
|
|
|
for tag in set_tags:
|
2015-02-04 05:56:19 +01:00
|
|
|
for k,v in tag.iteritems():
|
|
|
|
if k !='propagate_at_launch':
|
|
|
|
asg_tags.append(Tag(key=k,
|
|
|
|
value=v,
|
|
|
|
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
|
|
|
|
resource_id=group_name))
|
2014-04-25 01:56:26 +02:00
|
|
|
|
2014-03-06 03:14:53 +01:00
|
|
|
if not as_groups:
|
2014-05-19 16:52:30 +02:00
|
|
|
if not vpc_zone_identifier and not availability_zones:
|
|
|
|
availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()]
|
|
|
|
enforce_required_arguments(module)
|
|
|
|
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
|
2014-03-06 03:14:53 +01:00
|
|
|
ag = AutoScalingGroup(
|
|
|
|
group_name=group_name,
|
|
|
|
load_balancers=load_balancers,
|
|
|
|
availability_zones=availability_zones,
|
|
|
|
launch_config=launch_configs[0],
|
|
|
|
min_size=min_size,
|
|
|
|
max_size=max_size,
|
|
|
|
desired_capacity=desired_capacity,
|
|
|
|
vpc_zone_identifier=vpc_zone_identifier,
|
2014-05-19 16:52:30 +02:00
|
|
|
connection=connection,
|
2014-06-23 12:09:31 +02:00
|
|
|
tags=asg_tags,
|
|
|
|
health_check_period=health_check_period,
|
2015-07-09 21:29:00 +02:00
|
|
|
health_check_type=health_check_type,
|
2015-08-21 19:13:09 +02:00
|
|
|
default_cooldown=default_cooldown,
|
|
|
|
termination_policies=termination_policies)
|
2014-03-06 03:14:53 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
connection.create_auto_scaling_group(ag)
|
2015-01-08 23:24:44 +01:00
|
|
|
if wait_for_instances == True:
|
2015-03-28 02:41:53 +01:00
|
|
|
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
|
2015-01-08 23:24:44 +01:00
|
|
|
wait_for_elb(connection, module, group_name)
|
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
|
|
|
asg_properties = get_properties(as_group)
|
2014-08-25 07:18:41 +02:00
|
|
|
changed = True
|
|
|
|
return(changed, asg_properties)
|
2014-03-06 03:14:53 +01:00
|
|
|
except BotoServerError, e:
|
|
|
|
module.fail_json(msg=str(e))
|
|
|
|
else:
|
|
|
|
as_group = as_groups[0]
|
|
|
|
changed = False
|
2014-05-19 16:52:30 +02:00
|
|
|
for attr in ASG_ATTRIBUTES:
|
2015-03-17 17:57:18 +01:00
|
|
|
if module.params.get(attr, None) is not None:
|
2014-08-25 07:18:41 +02:00
|
|
|
module_attr = module.params.get(attr)
|
2015-01-06 20:04:03 +01:00
|
|
|
if attr == 'vpc_zone_identifier':
|
|
|
|
module_attr = ','.join(module_attr)
|
2014-08-25 07:18:41 +02:00
|
|
|
group_attr = getattr(as_group, attr)
|
|
|
|
# we do this because AWS and the module may return the same list
|
|
|
|
# sorted differently
|
|
|
|
try:
|
|
|
|
module_attr.sort()
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
group_attr.sort()
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
if group_attr != module_attr:
|
|
|
|
changed = True
|
|
|
|
setattr(as_group, attr, module_attr)
|
2014-05-19 16:52:30 +02:00
|
|
|
|
|
|
|
if len(set_tags) > 0:
|
2015-02-04 05:56:19 +01:00
|
|
|
have_tags = {}
|
|
|
|
want_tags = {}
|
|
|
|
|
|
|
|
for tag in asg_tags:
|
|
|
|
want_tags[tag.key] = [tag.value, tag.propagate_at_launch]
|
|
|
|
|
2015-02-04 06:13:52 +01:00
|
|
|
dead_tags = []
|
2015-02-04 05:56:19 +01:00
|
|
|
for tag in as_group.tags:
|
|
|
|
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
|
|
|
|
if not tag.key in want_tags:
|
|
|
|
changed = True
|
|
|
|
dead_tags.append(tag)
|
|
|
|
|
2015-02-04 06:13:52 +01:00
|
|
|
if dead_tags != []:
|
|
|
|
connection.delete_tags(dead_tags)
|
2015-02-04 05:56:19 +01:00
|
|
|
|
|
|
|
if have_tags != want_tags:
|
|
|
|
changed = True
|
2014-05-19 16:52:30 +02:00
|
|
|
connection.create_or_update_tags(asg_tags)
|
|
|
|
|
2014-03-06 03:14:53 +01:00
|
|
|
# handle loadbalancers separately because None != []
|
|
|
|
load_balancers = module.params.get('load_balancers') or []
|
2014-05-19 16:52:30 +02:00
|
|
|
if load_balancers and as_group.load_balancers != load_balancers:
|
2014-03-06 03:14:53 +01:00
|
|
|
changed = True
|
|
|
|
as_group.load_balancers = module.params.get('load_balancers')
|
|
|
|
|
2015-01-08 23:24:44 +01:00
|
|
|
|
|
|
|
if changed:
|
|
|
|
try:
|
2014-03-06 03:14:53 +01:00
|
|
|
as_group.update()
|
2015-01-08 23:24:44 +01:00
|
|
|
except BotoServerError, e:
|
|
|
|
module.fail_json(msg=str(e))
|
|
|
|
|
|
|
|
if wait_for_instances == True:
|
2015-03-28 02:41:53 +01:00
|
|
|
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
|
2015-01-08 23:24:44 +01:00
|
|
|
wait_for_elb(connection, module, group_name)
|
|
|
|
try:
|
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
2014-05-19 16:52:30 +02:00
|
|
|
asg_properties = get_properties(as_group)
|
2014-03-06 03:14:53 +01:00
|
|
|
except BotoServerError, e:
|
|
|
|
module.fail_json(msg=str(e))
|
2015-01-08 23:24:44 +01:00
|
|
|
return(changed, asg_properties)
|
2014-03-01 17:06:19 +01:00
|
|
|
|
2014-08-25 07:18:41 +02:00
|
|
|
|
2014-03-01 17:06:19 +01:00
|
|
|
def delete_autoscaling_group(connection, module):
|
|
|
|
group_name = module.params.get('name')
|
|
|
|
groups = connection.get_all_groups(names=[group_name])
|
|
|
|
if groups:
|
|
|
|
group = groups[0]
|
2014-08-07 23:39:30 +02:00
|
|
|
group.max_size = 0
|
|
|
|
group.min_size = 0
|
|
|
|
group.desired_capacity = 0
|
|
|
|
group.update()
|
2014-03-01 17:06:19 +01:00
|
|
|
instances = True
|
|
|
|
while instances:
|
2014-08-07 23:39:30 +02:00
|
|
|
tmp_groups = connection.get_all_groups(names=[group_name])
|
|
|
|
if tmp_groups:
|
|
|
|
tmp_group = tmp_groups[0]
|
|
|
|
if not tmp_group.instances:
|
|
|
|
instances = False
|
2014-03-01 17:06:19 +01:00
|
|
|
time.sleep(10)
|
|
|
|
|
|
|
|
group.delete()
|
2015-01-08 23:24:44 +01:00
|
|
|
while len(connection.get_all_groups(names=[group_name])):
|
2015-03-20 06:35:21 +01:00
|
|
|
time.sleep(5)
|
2014-08-25 07:18:41 +02:00
|
|
|
changed=True
|
|
|
|
return changed
|
|
|
|
else:
|
|
|
|
changed=False
|
|
|
|
return changed
|
|
|
|
|
|
|
|
def get_chunks(l, n):
|
|
|
|
for i in xrange(0, len(l), n):
|
|
|
|
yield l[i:i+n]
|
|
|
|
|
2015-03-28 02:41:53 +01:00
|
|
|
def update_size(group, max_size, min_size, dc):
|
|
|
|
|
|
|
|
log.debug("setting ASG sizes")
|
|
|
|
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size ))
|
|
|
|
group.max_size = max_size
|
|
|
|
group.min_size = min_size
|
|
|
|
group.desired_capacity = dc
|
|
|
|
group.update()
|
|
|
|
|
2014-08-25 07:18:41 +02:00
|
|
|
def replace(connection, module):
|
|
|
|
batch_size = module.params.get('replace_batch_size')
|
|
|
|
wait_timeout = module.params.get('wait_timeout')
|
2014-10-17 01:28:58 +02:00
|
|
|
group_name = module.params.get('name')
|
2014-08-25 07:18:41 +02:00
|
|
|
max_size = module.params.get('max_size')
|
|
|
|
min_size = module.params.get('min_size')
|
|
|
|
desired_capacity = module.params.get('desired_capacity')
|
2015-03-28 02:41:53 +01:00
|
|
|
lc_check = module.params.get('lc_check')
|
2014-08-25 07:18:41 +02:00
|
|
|
replace_instances = module.params.get('replace_instances')
|
2015-01-08 23:24:44 +01:00
|
|
|
|
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
2015-03-28 02:41:53 +01:00
|
|
|
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
|
2015-01-08 23:24:44 +01:00
|
|
|
props = get_properties(as_group)
|
|
|
|
instances = props['instances']
|
2014-08-25 07:18:41 +02:00
|
|
|
if replace_instances:
|
|
|
|
instances = replace_instances
|
2015-03-28 02:41:53 +01:00
|
|
|
# check to see if instances are replaceable if checking launch configs
|
|
|
|
|
|
|
|
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
|
|
|
|
num_new_inst_needed = desired_capacity - len(new_instances)
|
|
|
|
|
|
|
|
if lc_check:
|
|
|
|
if num_new_inst_needed == 0 and old_instances:
|
|
|
|
log.debug("No new instances needed, but old instances are present. Removing old instances")
|
|
|
|
terminate_batch(connection, module, old_instances, instances, True)
|
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
|
|
|
props = get_properties(as_group)
|
|
|
|
changed = True
|
|
|
|
return(changed, props)
|
|
|
|
|
|
|
|
# we don't want to spin up extra instances if not necessary
|
|
|
|
if num_new_inst_needed < batch_size:
|
|
|
|
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
|
|
|
|
batch_size = num_new_inst_needed
|
|
|
|
|
|
|
|
if not old_instances:
|
2014-08-25 07:18:41 +02:00
|
|
|
changed = False
|
|
|
|
return(changed, props)
|
|
|
|
|
|
|
|
# set temporary settings and wait for them to be reached
|
2015-03-28 02:41:53 +01:00
|
|
|
# This should get overriden if the number of instances left is less than the batch size.
|
|
|
|
|
2015-01-08 23:24:44 +01:00
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
2015-03-28 02:41:53 +01:00
|
|
|
update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
|
|
|
|
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
|
2015-03-17 13:59:47 +01:00
|
|
|
wait_for_elb(connection, module, group_name)
|
2015-01-08 23:24:44 +01:00
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
|
|
|
props = get_properties(as_group)
|
2014-08-25 07:18:41 +02:00
|
|
|
instances = props['instances']
|
|
|
|
if replace_instances:
|
|
|
|
instances = replace_instances
|
2015-03-28 02:41:53 +01:00
|
|
|
log.debug("beginning main loop")
|
2014-08-25 07:18:41 +02:00
|
|
|
for i in get_chunks(instances, batch_size):
|
2015-03-28 02:41:53 +01:00
|
|
|
# break out of this loop if we have enough new instances
|
|
|
|
break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
|
|
|
|
wait_for_term_inst(connection, module, term_instances)
|
|
|
|
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
|
2015-01-08 23:24:44 +01:00
|
|
|
wait_for_elb(connection, module, group_name)
|
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
2015-03-28 02:41:53 +01:00
|
|
|
if break_early:
|
|
|
|
log.debug("breaking loop")
|
|
|
|
break
|
|
|
|
update_size(as_group, max_size, min_size, desired_capacity)
|
2014-08-25 07:18:41 +02:00
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
|
|
|
asg_properties = get_properties(as_group)
|
2015-03-28 02:41:53 +01:00
|
|
|
log.debug("Rolling update complete.")
|
2014-08-25 07:18:41 +02:00
|
|
|
changed=True
|
|
|
|
return(changed, asg_properties)
|
|
|
|
|
2015-03-28 02:41:53 +01:00
|
|
|
def get_instances_by_lc(props, lc_check, initial_instances):
|
2014-08-25 07:18:41 +02:00
|
|
|
|
2015-03-28 02:41:53 +01:00
|
|
|
new_instances = []
|
2014-08-25 07:18:41 +02:00
|
|
|
old_instances = []
|
2015-03-28 02:41:53 +01:00
|
|
|
# old instances are those that have the old launch config
|
|
|
|
if lc_check:
|
|
|
|
for i in props['instances']:
|
|
|
|
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
|
|
|
|
new_instances.append(i)
|
|
|
|
else:
|
|
|
|
old_instances.append(i)
|
|
|
|
|
|
|
|
else:
|
|
|
|
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
|
|
|
|
for i in props['instances']:
|
|
|
|
if i not in initial_instances:
|
|
|
|
new_instances.append(i)
|
|
|
|
else:
|
|
|
|
old_instances.append(i)
|
|
|
|
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
|
|
|
|
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
|
|
|
|
|
|
|
|
return new_instances, old_instances
|
|
|
|
|
|
|
|
|
|
|
|
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
|
|
|
|
instances_to_terminate = []
|
2014-08-25 07:18:41 +02:00
|
|
|
instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances'])
|
|
|
|
|
2015-03-28 02:41:53 +01:00
|
|
|
# check to make sure instances given are actually in the given ASG
|
|
|
|
# and they have a non-current launch config
|
2014-08-25 07:18:41 +02:00
|
|
|
if lc_check:
|
|
|
|
for i in instances:
|
|
|
|
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
|
2015-03-28 02:41:53 +01:00
|
|
|
instances_to_terminate.append(i)
|
2014-03-01 17:06:19 +01:00
|
|
|
else:
|
2015-03-28 02:41:53 +01:00
|
|
|
for i in instances:
|
|
|
|
if i in initial_instances:
|
|
|
|
instances_to_terminate.append(i)
|
|
|
|
return instances_to_terminate
|
2014-08-25 07:18:41 +02:00
|
|
|
|
2015-03-28 02:41:53 +01:00
|
|
|
def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
|
|
|
|
batch_size = module.params.get('replace_batch_size')
|
|
|
|
min_size = module.params.get('min_size')
|
|
|
|
desired_capacity = module.params.get('desired_capacity')
|
|
|
|
group_name = module.params.get('name')
|
|
|
|
wait_timeout = int(module.params.get('wait_timeout'))
|
|
|
|
lc_check = module.params.get('lc_check')
|
|
|
|
decrement_capacity = False
|
|
|
|
break_loop = False
|
|
|
|
|
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
|
|
|
props = get_properties(as_group)
|
|
|
|
desired_size = as_group.min_size
|
|
|
|
|
|
|
|
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
|
|
|
|
num_new_inst_needed = desired_capacity - len(new_instances)
|
|
|
|
|
|
|
|
# check to make sure instances given are actually in the given ASG
|
|
|
|
# and they have a non-current launch config
|
|
|
|
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
|
|
|
|
|
|
|
|
log.debug("new instances needed: {0}".format(num_new_inst_needed))
|
|
|
|
log.debug("new instances: {0}".format(new_instances))
|
|
|
|
log.debug("old instances: {0}".format(old_instances))
|
|
|
|
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
|
|
|
|
|
|
|
|
if num_new_inst_needed == 0:
|
|
|
|
decrement_capacity = True
|
|
|
|
if as_group.min_size != min_size:
|
|
|
|
as_group.min_size = min_size
|
|
|
|
as_group.update()
|
|
|
|
log.debug("Updating minimum size back to original of {0}".format(min_size))
|
|
|
|
#if are some leftover old instances, but we are already at capacity with new ones
|
|
|
|
# we don't want to decrement capacity
|
|
|
|
if leftovers:
|
|
|
|
decrement_capacity = False
|
|
|
|
break_loop = True
|
|
|
|
instances_to_terminate = old_instances
|
|
|
|
desired_size = min_size
|
|
|
|
log.debug("No new instances needed")
|
|
|
|
|
|
|
|
if num_new_inst_needed < batch_size and num_new_inst_needed !=0 :
|
|
|
|
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
|
|
|
|
decrement_capacity = False
|
|
|
|
break_loop = False
|
|
|
|
log.debug("{0} new instances needed".format(num_new_inst_needed))
|
|
|
|
|
|
|
|
log.debug("decrementing capacity: {0}".format(decrement_capacity))
|
|
|
|
|
|
|
|
for instance_id in instances_to_terminate:
|
|
|
|
elb_dreg(connection, module, group_name, instance_id)
|
|
|
|
log.debug("terminating instance: {0}".format(instance_id))
|
|
|
|
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
|
2014-08-25 07:18:41 +02:00
|
|
|
|
|
|
|
# we wait to make sure the machines we marked as Unhealthy are
|
|
|
|
# no longer in the list
|
|
|
|
|
2015-03-28 02:41:53 +01:00
|
|
|
return break_loop, desired_size, instances_to_terminate
|
|
|
|
|
|
|
|
|
|
|
|
def wait_for_term_inst(connection, module, term_instances):
|
|
|
|
|
|
|
|
batch_size = module.params.get('replace_batch_size')
|
|
|
|
wait_timeout = module.params.get('wait_timeout')
|
|
|
|
group_name = module.params.get('name')
|
|
|
|
lc_check = module.params.get('lc_check')
|
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
|
|
|
props = get_properties(as_group)
|
2014-08-25 07:18:41 +02:00
|
|
|
count = 1
|
|
|
|
wait_timeout = time.time() + wait_timeout
|
|
|
|
while wait_timeout > time.time() and count > 0:
|
2015-03-28 02:41:53 +01:00
|
|
|
log.debug("waiting for instances to terminate")
|
2014-08-25 07:18:41 +02:00
|
|
|
count = 0
|
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
|
|
|
props = get_properties(as_group)
|
|
|
|
instance_facts = props['instance_facts']
|
2015-03-28 02:41:53 +01:00
|
|
|
instances = ( i for i in instance_facts if i in term_instances)
|
2014-08-25 07:18:41 +02:00
|
|
|
for i in instances:
|
2015-03-28 02:41:53 +01:00
|
|
|
lifecycle = instance_facts[i]['lifecycle_state']
|
|
|
|
health = instance_facts[i]['health_status']
|
|
|
|
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
|
|
|
|
if lifecycle == 'Terminating' or healthy == 'Unhealthy':
|
2014-08-25 07:18:41 +02:00
|
|
|
count += 1
|
|
|
|
time.sleep(10)
|
|
|
|
|
|
|
|
if wait_timeout <= time.time():
|
|
|
|
# waiting took too long
|
|
|
|
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
|
|
|
|
|
2015-03-28 02:41:53 +01:00
|
|
|
|
|
|
|
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
|
2015-01-08 23:24:44 +01:00
|
|
|
|
2014-08-25 07:18:41 +02:00
|
|
|
# make sure we have the latest stats after that last loop.
|
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
|
|
|
props = get_properties(as_group)
|
2015-03-28 02:41:53 +01:00
|
|
|
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
|
2014-08-25 07:18:41 +02:00
|
|
|
# now we make sure that we have enough instances in a viable state
|
|
|
|
wait_timeout = time.time() + wait_timeout
|
2015-01-08 23:24:44 +01:00
|
|
|
while wait_timeout > time.time() and desired_size > props[prop]:
|
2015-03-28 02:41:53 +01:00
|
|
|
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
|
2014-08-25 07:18:41 +02:00
|
|
|
time.sleep(10)
|
2015-03-17 13:59:47 +01:00
|
|
|
as_group = connection.get_all_groups(names=[group_name])[0]
|
2014-08-25 07:18:41 +02:00
|
|
|
props = get_properties(as_group)
|
|
|
|
if wait_timeout <= time.time():
|
|
|
|
# waiting took too long
|
|
|
|
module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime())
|
2015-03-28 02:41:53 +01:00
|
|
|
log.debug("Reached {0}: {1}".format(prop, desired_size))
|
2015-01-08 23:24:44 +01:00
|
|
|
return props
|
2014-03-01 17:06:19 +01:00
|
|
|
|
|
|
|
def main():
|
2014-03-06 03:14:53 +01:00
|
|
|
argument_spec = ec2_argument_spec()
|
|
|
|
argument_spec.update(
|
|
|
|
dict(
|
|
|
|
name=dict(required=True, type='str'),
|
|
|
|
load_balancers=dict(type='list'),
|
|
|
|
availability_zones=dict(type='list'),
|
|
|
|
launch_config_name=dict(type='str'),
|
|
|
|
min_size=dict(type='int'),
|
|
|
|
max_size=dict(type='int'),
|
|
|
|
desired_capacity=dict(type='int'),
|
2014-12-06 03:04:16 +01:00
|
|
|
vpc_zone_identifier=dict(type='list'),
|
2014-08-25 07:18:41 +02:00
|
|
|
replace_batch_size=dict(type='int', default=1),
|
|
|
|
replace_all_instances=dict(type='bool', default=False),
|
|
|
|
replace_instances=dict(type='list', default=[]),
|
|
|
|
lc_check=dict(type='bool', default=True),
|
|
|
|
wait_timeout=dict(type='int', default=300),
|
2014-03-06 03:14:53 +01:00
|
|
|
state=dict(default='present', choices=['present', 'absent']),
|
2014-05-19 16:52:30 +02:00
|
|
|
tags=dict(type='list', default=[]),
|
2014-06-23 12:09:31 +02:00
|
|
|
health_check_period=dict(type='int', default=300),
|
2014-07-10 17:01:19 +02:00
|
|
|
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
|
2015-07-09 21:29:00 +02:00
|
|
|
default_cooldown=dict(type='int', default=300),
|
2015-08-21 19:13:09 +02:00
|
|
|
wait_for_instances=dict(type='bool', default=True),
|
2015-09-17 21:47:16 +02:00
|
|
|
termination_policies=dict(type='list', default='Default')
|
2014-11-12 18:16:53 +01:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
module = AnsibleModule(
|
|
|
|
argument_spec=argument_spec,
|
|
|
|
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
|
2014-03-01 17:06:19 +01:00
|
|
|
)
|
2015-04-02 01:16:54 +02:00
|
|
|
|
|
|
|
if not HAS_BOTO:
|
|
|
|
module.fail_json(msg='boto required for this module')
|
|
|
|
|
2014-03-01 17:06:19 +01:00
|
|
|
state = module.params.get('state')
|
2014-08-25 07:18:41 +02:00
|
|
|
replace_instances = module.params.get('replace_instances')
|
|
|
|
replace_all_instances = module.params.get('replace_all_instances')
|
2014-03-06 03:14:53 +01:00
|
|
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
2014-03-01 17:06:19 +01:00
|
|
|
try:
|
2014-03-06 03:14:53 +01:00
|
|
|
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
2014-06-06 02:40:21 +02:00
|
|
|
if not connection:
|
|
|
|
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
|
2014-03-01 17:06:19 +01:00
|
|
|
except boto.exception.NoAuthHandlerFound, e:
|
2014-03-06 03:14:53 +01:00
|
|
|
module.fail_json(msg=str(e))
|
2014-10-14 16:22:26 +02:00
|
|
|
changed = create_changed = replace_changed = False
|
2014-11-12 18:16:53 +01:00
|
|
|
|
2014-03-01 17:06:19 +01:00
|
|
|
if state == 'present':
|
2014-08-25 07:18:41 +02:00
|
|
|
create_changed, asg_properties=create_autoscaling_group(connection, module)
|
2014-03-01 17:06:19 +01:00
|
|
|
elif state == 'absent':
|
2014-08-25 07:18:41 +02:00
|
|
|
changed = delete_autoscaling_group(connection, module)
|
|
|
|
module.exit_json( changed = changed )
|
2014-11-12 18:16:53 +01:00
|
|
|
if replace_all_instances or replace_instances:
|
|
|
|
replace_changed, asg_properties=replace(connection, module)
|
2014-08-25 07:18:41 +02:00
|
|
|
if create_changed or replace_changed:
|
|
|
|
changed = True
|
|
|
|
module.exit_json( changed = changed, **asg_properties )
|
2014-03-01 17:06:19 +01:00
|
|
|
|
2015-01-08 23:24:44 +01:00
|
|
|
|
2014-03-01 17:06:19 +01:00
|
|
|
main()
|