Split ec2_elb_* modules in service of rename/interface changes (#30716)
* Split ec2_elb_* modules in service of rename/interface changes (#30532) * Undeprecate ec2_elb_* * Make ec2_elb* full fledged modules rather than aliases * Split tests for ec2_elb_lb and elb_classicb_lb * Change names in documentation of old and new elb modules Add tests for ec2_elb_lb * Update CHANGELOG with new status of ec2_elb_* vs. elb_classic_*
This commit is contained in:
parent
bc0b069cb7
commit
fd42243936
17 changed files with 2505 additions and 71 deletions
|
@ -226,10 +226,10 @@ Ansible Changes By Release
|
|||
|
||||
### Module Notes
|
||||
- By mistake, an early version of elb_classic_lb, elb_instance, and elb_classic_lb_facts modules
|
||||
were released and marked as stableinterface. These will be marked as preview in 2.4.1 and their
|
||||
were released and marked as stableinterface. These are now marked as preview in 2.4.1 and their
|
||||
parameters and return values may change in 2.5.0. Part of this mistake included deprecating the
|
||||
ec2_elb_lb, ec2_lb, and ec2_elb_facts modules prematurely. These modules won't be deprecated
|
||||
until the replacements above have a stableinterface and the erroneous deprecation will be fixed
|
||||
until the replacements above have a stableinterface and the erroneous deprecation has been fixed
|
||||
in 2.4.1.
|
||||
- The docker_container module has gained a new option, `working_dir` which allows
|
||||
specifying the working directory for the command being run in the image.
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
elb_instance.py
|
|
@ -1 +0,0 @@
|
|||
elb_classic_lb_facts.py
|
|
@ -1 +0,0 @@
|
|||
elb_classic_lb.py
|
377
lib/ansible/modules/cloud/amazon/ec2_elb.py
Normal file
377
lib/ansible/modules/cloud/amazon/ec2_elb.py
Normal file
|
@ -0,0 +1,377 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright: Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['stableinterface'],
|
||||
'supported_by': 'certified'}
|
||||
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: ec2_elb
|
||||
short_description: De-registers or registers instances from EC2 ELBs
|
||||
description:
|
||||
- This module de-registers or registers an AWS EC2 instance from the ELBs
|
||||
that it belongs to.
|
||||
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
|
||||
if state=absent is passed as an argument.
|
||||
- Will be marked changed when called only if there are ELBs found to operate on.
|
||||
version_added: "1.2"
|
||||
author: "John Jarvis (@jarv)"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- register or deregister the instance
|
||||
required: true
|
||||
choices: ['present', 'absent']
|
||||
instance_id:
|
||||
description:
|
||||
- EC2 Instance ID
|
||||
required: true
|
||||
ec2_elbs:
|
||||
description:
|
||||
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
|
||||
required: false
|
||||
default: None
|
||||
enable_availability_zone:
|
||||
description:
|
||||
- Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
|
||||
been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
|
||||
required: false
|
||||
default: yes
|
||||
choices: [ "yes", "no" ]
|
||||
wait:
|
||||
description:
|
||||
- Wait for instance registration or deregistration to complete successfully before returning.
|
||||
required: false
|
||||
default: yes
|
||||
choices: [ "yes", "no" ]
|
||||
validate_certs:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
wait_timeout:
|
||||
description:
|
||||
- Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs.
|
||||
If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
|
||||
required: false
|
||||
default: 0
|
||||
version_added: "1.6"
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
# basic pre_task and post_task example
|
||||
pre_tasks:
|
||||
- name: Gathering ec2 facts
|
||||
action: ec2_facts
|
||||
- name: Instance De-register
|
||||
local_action:
|
||||
module: ec2_elb
|
||||
instance_id: "{{ ansible_ec2_instance_id }}"
|
||||
state: absent
|
||||
roles:
|
||||
- myrole
|
||||
post_tasks:
|
||||
- name: Instance Register
|
||||
local_action:
|
||||
module: ec2_elb
|
||||
instance_id: "{{ ansible_ec2_instance_id }}"
|
||||
ec2_elbs: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ ec2_elbs }}"
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.ec2
|
||||
import boto.ec2.autoscale
|
||||
import boto.ec2.elb
|
||||
from boto.regioninfo import RegionInfo
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec,
|
||||
get_aws_connection_info)
|
||||
|
||||
|
||||
class ElbManager:
|
||||
"""Handles EC2 instance ELB registration and de-registration"""
|
||||
|
||||
def __init__(self, module, instance_id=None, ec2_elbs=None,
|
||||
region=None, **aws_connect_params):
|
||||
self.module = module
|
||||
self.instance_id = instance_id
|
||||
self.region = region
|
||||
self.aws_connect_params = aws_connect_params
|
||||
self.lbs = self._get_instance_lbs(ec2_elbs)
|
||||
self.changed = False
|
||||
|
||||
def deregister(self, wait, timeout):
|
||||
"""De-register the instance from all ELBs and wait for the ELB
|
||||
to report it out-of-service"""
|
||||
|
||||
for lb in self.lbs:
|
||||
initial_state = self._get_instance_health(lb)
|
||||
if initial_state is None:
|
||||
# Instance isn't registered with this load
|
||||
# balancer. Ignore it and try the next one.
|
||||
continue
|
||||
|
||||
lb.deregister_instances([self.instance_id])
|
||||
|
||||
# The ELB is changing state in some way. Either an instance that's
|
||||
# InService is moving to OutOfService, or an instance that's
|
||||
# already OutOfService is being deregistered.
|
||||
self.changed = True
|
||||
|
||||
if wait:
|
||||
self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
|
||||
|
||||
def register(self, wait, enable_availability_zone, timeout):
|
||||
"""Register the instance for all ELBs and wait for the ELB
|
||||
to report the instance in-service"""
|
||||
for lb in self.lbs:
|
||||
initial_state = self._get_instance_health(lb)
|
||||
|
||||
if enable_availability_zone:
|
||||
self._enable_availailability_zone(lb)
|
||||
|
||||
lb.register_instances([self.instance_id])
|
||||
|
||||
if wait:
|
||||
self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
|
||||
else:
|
||||
# We cannot assume no change was made if we don't wait
|
||||
# to find out
|
||||
self.changed = True
|
||||
|
||||
def exists(self, lbtest):
|
||||
""" Verify that the named ELB actually exists """
|
||||
|
||||
found = False
|
||||
for lb in self.lbs:
|
||||
if lb.name == lbtest:
|
||||
found=True
|
||||
break
|
||||
return found
|
||||
|
||||
def _enable_availailability_zone(self, lb):
|
||||
"""Enable the current instance's availability zone in the provided lb.
|
||||
Returns True if the zone was enabled or False if no change was made.
|
||||
lb: load balancer"""
|
||||
instance = self._get_instance()
|
||||
if instance.placement in lb.availability_zones:
|
||||
return False
|
||||
|
||||
lb.enable_zones(zones=instance.placement)
|
||||
|
||||
# If successful, the new zone will have been added to
|
||||
# lb.availability_zones
|
||||
return instance.placement in lb.availability_zones
|
||||
|
||||
def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
|
||||
"""Wait for an ELB to change state
|
||||
lb: load balancer
|
||||
awaited_state : state to poll for (string)"""
|
||||
|
||||
wait_timeout = time.time() + timeout
|
||||
while True:
|
||||
instance_state = self._get_instance_health(lb)
|
||||
|
||||
if not instance_state:
|
||||
msg = ("The instance %s could not be put in service on %s."
|
||||
" Reason: Invalid Instance")
|
||||
self.module.fail_json(msg=msg % (self.instance_id, lb))
|
||||
|
||||
if instance_state.state == awaited_state:
|
||||
# Check the current state against the initial state, and only set
|
||||
# changed if they are different.
|
||||
if (initial_state is None) or (instance_state.state != initial_state.state):
|
||||
self.changed = True
|
||||
break
|
||||
elif self._is_instance_state_pending(instance_state):
|
||||
# If it's pending, we'll skip further checks and continue waiting
|
||||
pass
|
||||
elif (awaited_state == 'InService'
|
||||
and instance_state.reason_code == "Instance"
|
||||
and time.time() >= wait_timeout):
|
||||
# If the reason_code for the instance being out of service is
|
||||
# "Instance" this indicates a failure state, e.g. the instance
|
||||
# has failed a health check or the ELB does not have the
|
||||
# instance's availability zone enabled. The exact reason why is
|
||||
# described in InstantState.description.
|
||||
msg = ("The instance %s could not be put in service on %s."
|
||||
" Reason: %s")
|
||||
self.module.fail_json(msg=msg % (self.instance_id,
|
||||
lb,
|
||||
instance_state.description))
|
||||
time.sleep(1)
|
||||
|
||||
def _is_instance_state_pending(self, instance_state):
|
||||
"""
|
||||
Determines whether the instance_state is "pending", meaning there is
|
||||
an operation under way to bring it in service.
|
||||
"""
|
||||
# This is messy, because AWS provides no way to distinguish between
|
||||
# an instance that is is OutOfService because it's pending vs. OutOfService
|
||||
# because it's failing health checks. So we're forced to analyze the
|
||||
# description, which is likely to be brittle.
|
||||
return (instance_state and 'pending' in instance_state.description)
|
||||
|
||||
def _get_instance_health(self, lb):
|
||||
"""
|
||||
Check instance health, should return status object or None under
|
||||
certain error conditions.
|
||||
"""
|
||||
try:
|
||||
status = lb.get_instance_health([self.instance_id])[0]
|
||||
except boto.exception.BotoServerError as e:
|
||||
if e.error_code == 'InvalidInstance':
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
return status
|
||||
|
||||
def _get_instance_lbs(self, ec2_elbs=None):
|
||||
"""Returns a list of ELBs attached to self.instance_id
|
||||
ec2_elbs: an optional list of elb names that will be used
|
||||
for elb lookup instead of returning what elbs
|
||||
are attached to self.instance_id"""
|
||||
|
||||
if not ec2_elbs:
|
||||
ec2_elbs = self._get_auto_scaling_group_lbs()
|
||||
|
||||
try:
|
||||
elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
elbs = []
|
||||
marker = None
|
||||
while True:
|
||||
try:
|
||||
newelbs = elb.get_all_load_balancers(marker=marker)
|
||||
marker = newelbs.next_marker
|
||||
elbs.extend(newelbs)
|
||||
if not marker:
|
||||
break
|
||||
except TypeError:
|
||||
# Older version of boto do not allow for params
|
||||
elbs = elb.get_all_load_balancers()
|
||||
break
|
||||
|
||||
if ec2_elbs:
|
||||
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
|
||||
else:
|
||||
lbs = []
|
||||
for lb in elbs:
|
||||
for info in lb.instances:
|
||||
if self.instance_id == info.id:
|
||||
lbs.append(lb)
|
||||
return lbs
|
||||
|
||||
def _get_auto_scaling_group_lbs(self):
|
||||
"""Returns a list of ELBs associated with self.instance_id
|
||||
indirectly through its auto scaling group membership"""
|
||||
|
||||
try:
|
||||
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
|
||||
if len(asg_instances) > 1:
|
||||
self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
|
||||
|
||||
if not asg_instances:
|
||||
asg_elbs = []
|
||||
else:
|
||||
asg_name = asg_instances[0].group_name
|
||||
|
||||
asgs = asg.get_all_groups([asg_name])
|
||||
if len(asg_instances) != 1:
|
||||
self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
|
||||
|
||||
asg_elbs = asgs[0].load_balancers
|
||||
|
||||
return asg_elbs
|
||||
|
||||
def _get_instance(self):
|
||||
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
|
||||
try:
|
||||
ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state={'required': True},
|
||||
instance_id={'required': True},
|
||||
ec2_elbs={'default': None, 'required': False, 'type':'list'},
|
||||
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
|
||||
wait={'required': False, 'default': True, 'type': 'bool'},
|
||||
wait_timeout={'required': False, 'default': 0, 'type': 'int'}
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
|
||||
ec2_elbs = module.params['ec2_elbs']
|
||||
wait = module.params['wait']
|
||||
enable_availability_zone = module.params['enable_availability_zone']
|
||||
timeout = module.params['wait_timeout']
|
||||
|
||||
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
|
||||
module.fail_json(msg="ELBs are required for registration")
|
||||
|
||||
instance_id = module.params['instance_id']
|
||||
elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params)
|
||||
|
||||
if ec2_elbs is not None:
|
||||
for elb in ec2_elbs:
|
||||
if not elb_man.exists(elb):
|
||||
msg="ELB %s does not exist" % elb
|
||||
module.fail_json(msg=msg)
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
elb_man.register(wait, enable_availability_zone, timeout)
|
||||
elif module.params['state'] == 'absent':
|
||||
elb_man.deregister(wait, timeout)
|
||||
|
||||
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
|
||||
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
|
||||
|
||||
module.exit_json(**ec2_facts_result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
266
lib/ansible/modules/cloud/amazon/ec2_elb_facts.py
Normal file
266
lib/ansible/modules/cloud/amazon/ec2_elb_facts.py
Normal file
|
@ -0,0 +1,266 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# This is a free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This Ansible library is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ec2_elb_facts
|
||||
short_description: Gather facts about EC2 Elastic Load Balancers in AWS
|
||||
description:
|
||||
- Gather facts about EC2 Elastic Load Balancers in AWS
|
||||
version_added: "2.0"
|
||||
author:
|
||||
- "Michael Schultz (github.com/mjschultz)"
|
||||
- "Fernando Jose Pando (@nand0p)"
|
||||
options:
|
||||
names:
|
||||
description:
|
||||
- List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['elb_ids', 'ec2_elbs']
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||
# Output format tries to match ec2_elb_lb module input parameters
|
||||
|
||||
# Gather facts about all ELBs
|
||||
- action:
|
||||
module: ec2_elb_facts
|
||||
register: elb_facts
|
||||
|
||||
- action:
|
||||
module: debug
|
||||
msg: "{{ item.dns_name }}"
|
||||
with_items: "{{ elb_facts.elbs }}"
|
||||
|
||||
# Gather facts about a particular ELB
|
||||
- action:
|
||||
module: ec2_elb_facts
|
||||
names: frontend-prod-elb
|
||||
register: elb_facts
|
||||
|
||||
- action:
|
||||
module: debug
|
||||
msg: "{{ elb_facts.elbs.0.dns_name }}"
|
||||
|
||||
# Gather facts about a set of ELBs
|
||||
- action:
|
||||
module: ec2_elb_facts
|
||||
names:
|
||||
- frontend-prod-elb
|
||||
- backend-prod-elb
|
||||
register: elb_facts
|
||||
|
||||
- action:
|
||||
module: debug
|
||||
msg: "{{ item.dns_name }}"
|
||||
with_items: "{{ elb_facts.elbs }}"
|
||||
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.ec2 import (
|
||||
AWSRetry,
|
||||
connect_to_aws,
|
||||
ec2_argument_spec,
|
||||
get_aws_connection_info,
|
||||
)
|
||||
|
||||
try:
|
||||
import boto.ec2.elb
|
||||
from boto.ec2.tag import Tag
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
||||
|
||||
class ElbInformation(object):
|
||||
"""Handles ELB information."""
|
||||
|
||||
def __init__(self,
|
||||
module,
|
||||
names,
|
||||
region,
|
||||
**aws_connect_params):
|
||||
|
||||
self.module = module
|
||||
self.names = names
|
||||
self.region = region
|
||||
self.aws_connect_params = aws_connect_params
|
||||
self.connection = self._get_elb_connection()
|
||||
|
||||
def _get_tags(self, elbname):
|
||||
params = {'LoadBalancerNames.member.1': elbname}
|
||||
elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
|
||||
return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
|
||||
|
||||
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
|
||||
def _get_elb_connection(self):
|
||||
return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
|
||||
|
||||
def _get_elb_listeners(self, listeners):
|
||||
listener_list = []
|
||||
|
||||
for listener in listeners:
|
||||
listener_dict = {
|
||||
'load_balancer_port': listener[0],
|
||||
'instance_port': listener[1],
|
||||
'protocol': listener[2],
|
||||
}
|
||||
|
||||
try:
|
||||
ssl_certificate_id = listener[4]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
if ssl_certificate_id:
|
||||
listener_dict['ssl_certificate_id'] = ssl_certificate_id
|
||||
|
||||
listener_list.append(listener_dict)
|
||||
|
||||
return listener_list
|
||||
|
||||
def _get_health_check(self, health_check):
|
||||
protocol, port_path = health_check.target.split(':')
|
||||
try:
|
||||
port, path = port_path.split('/', 1)
|
||||
path = '/{0}'.format(path)
|
||||
except ValueError:
|
||||
port = port_path
|
||||
path = None
|
||||
|
||||
health_check_dict = {
|
||||
'ping_protocol': protocol.lower(),
|
||||
'ping_port': int(port),
|
||||
'response_timeout': health_check.timeout,
|
||||
'interval': health_check.interval,
|
||||
'unhealthy_threshold': health_check.unhealthy_threshold,
|
||||
'healthy_threshold': health_check.healthy_threshold,
|
||||
}
|
||||
|
||||
if path:
|
||||
health_check_dict['ping_path'] = path
|
||||
return health_check_dict
|
||||
|
||||
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
|
||||
def _get_elb_info(self, elb):
|
||||
elb_info = {
|
||||
'name': elb.name,
|
||||
'zones': elb.availability_zones,
|
||||
'dns_name': elb.dns_name,
|
||||
'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
|
||||
'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
|
||||
'hosted_zone_name': elb.canonical_hosted_zone_name,
|
||||
'hosted_zone_id': elb.canonical_hosted_zone_name_id,
|
||||
'instances': [instance.id for instance in elb.instances],
|
||||
'listeners': self._get_elb_listeners(elb.listeners),
|
||||
'scheme': elb.scheme,
|
||||
'security_groups': elb.security_groups,
|
||||
'health_check': self._get_health_check(elb.health_check),
|
||||
'subnets': elb.subnets,
|
||||
'instances_inservice': [],
|
||||
'instances_inservice_count': 0,
|
||||
'instances_outofservice': [],
|
||||
'instances_outofservice_count': 0,
|
||||
'instances_inservice_percent': 0.0,
|
||||
'tags': self._get_tags(elb.name)
|
||||
}
|
||||
|
||||
if elb.vpc_id:
|
||||
elb_info['vpc_id'] = elb.vpc_id
|
||||
|
||||
if elb.instances:
|
||||
instance_health = self.connection.describe_instance_health(elb.name)
|
||||
elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
|
||||
elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
|
||||
elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
|
||||
elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
|
||||
try:
|
||||
elb_info['instances_inservice_percent'] = (
|
||||
float(elb_info['instances_inservice_count']) /
|
||||
float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count'])
|
||||
) * 100.
|
||||
except ZeroDivisionError:
|
||||
elb_info['instances_inservice_percent'] = 0.
|
||||
return elb_info
|
||||
|
||||
def list_elbs(self):
|
||||
elb_array, token = [], None
|
||||
get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers)
|
||||
while True:
|
||||
all_elbs = get_elb_with_backoff(marker=token)
|
||||
token = all_elbs.next_marker
|
||||
|
||||
if all_elbs:
|
||||
if self.names:
|
||||
for existing_lb in all_elbs:
|
||||
if existing_lb.name in self.names:
|
||||
elb_array.append(existing_lb)
|
||||
else:
|
||||
elb_array.extend(all_elbs)
|
||||
else:
|
||||
break
|
||||
|
||||
if token is None:
|
||||
break
|
||||
|
||||
return list(map(self._get_elb_info, elb_array))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
names={'default': [], 'type': 'list'}
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
if not HAS_BOTO:
|
||||
module.fail_json(msg='boto required for this module')
|
||||
|
||||
try:
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
if not region:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
names = module.params['names']
|
||||
elb_information = ElbInformation(
|
||||
module, names, region, **aws_connect_params)
|
||||
|
||||
ec2_facts_result = dict(changed=False,
|
||||
elbs=elb_information.list_elbs())
|
||||
|
||||
except BotoServerError as err:
|
||||
module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
module.exit_json(**ec2_facts_result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
1374
lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
Normal file
1374
lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -7,7 +7,7 @@ __metaclass__ = type
|
|||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['stableinterface'],
|
||||
'status': ['preview'],
|
||||
'supported_by': 'certified'}
|
||||
|
||||
|
||||
|
@ -170,8 +170,7 @@ EXAMPLES = """
|
|||
|
||||
# Basic provisioning example (non-VPC)
|
||||
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "test-please-delete"
|
||||
state: present
|
||||
zones:
|
||||
|
@ -188,11 +187,11 @@ EXAMPLES = """
|
|||
instance_port: 80
|
||||
# ssl certificate required for https or ssl
|
||||
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
|
||||
delegate_to: localhost
|
||||
|
||||
# Internal ELB example
|
||||
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "test-vpc"
|
||||
scheme: internal
|
||||
state: present
|
||||
|
@ -206,10 +205,10 @@ EXAMPLES = """
|
|||
- protocol: http # options are http, https, ssl, tcp
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
delegate_to: localhost
|
||||
|
||||
# Configure a health check and the access logs
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "test-please-delete"
|
||||
state: present
|
||||
zones:
|
||||
|
@ -230,33 +229,33 @@ EXAMPLES = """
|
|||
interval: 5 # minutes (defaults to 60)
|
||||
s3_location: "my-bucket" # This value is required if access_logs is set
|
||||
s3_prefix: "logs"
|
||||
delegate_to: localhost
|
||||
|
||||
# Ensure ELB is gone
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "test-please-delete"
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
|
||||
# Ensure ELB is gone and wait for check (for default timeout)
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "test-please-delete"
|
||||
state: absent
|
||||
wait: yes
|
||||
delegate_to: localhost
|
||||
|
||||
# Ensure ELB is gone and wait for check with timeout value
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "test-please-delete"
|
||||
state: absent
|
||||
wait: yes
|
||||
wait_timeout: 600
|
||||
delegate_to: localhost
|
||||
|
||||
# Normally, this module will purge any listeners that exist on the ELB
|
||||
# but aren't specified in the listeners parameter. If purge_listeners is
|
||||
# false it leaves them alone
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "test-please-delete"
|
||||
state: present
|
||||
zones:
|
||||
|
@ -267,12 +266,12 @@ EXAMPLES = """
|
|||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
purge_listeners: no
|
||||
delegate_to: localhost
|
||||
|
||||
# Normally, this module will leave availability zones that are enabled
|
||||
# on the ELB alone. If purge_zones is true, then any extraneous zones
|
||||
# will be removed
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "test-please-delete"
|
||||
state: present
|
||||
zones:
|
||||
|
@ -283,10 +282,10 @@ EXAMPLES = """
|
|||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
purge_zones: yes
|
||||
delegate_to: localhost
|
||||
|
||||
# Creates a ELB and assigns a list of subnets to it.
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
state: present
|
||||
name: 'New ELB'
|
||||
security_group_ids: 'sg-123456, sg-67890'
|
||||
|
@ -297,11 +296,11 @@ EXAMPLES = """
|
|||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
delegate_to: localhost
|
||||
|
||||
# Create an ELB with connection draining, increased idle timeout and cross availability
|
||||
# zone load balancing
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "New ELB"
|
||||
state: present
|
||||
connection_draining_timeout: 60
|
||||
|
@ -315,10 +314,10 @@ EXAMPLES = """
|
|||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
delegate_to: localhost
|
||||
|
||||
# Create an ELB with load balancer stickiness enabled
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "New ELB"
|
||||
state: present
|
||||
region: us-east-1
|
||||
|
@ -333,10 +332,10 @@ EXAMPLES = """
|
|||
type: loadbalancer
|
||||
enabled: yes
|
||||
expiration: 300
|
||||
delegate_to: localhost
|
||||
|
||||
# Create an ELB with application stickiness enabled
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "New ELB"
|
||||
state: present
|
||||
region: us-east-1
|
||||
|
@ -351,10 +350,10 @@ EXAMPLES = """
|
|||
type: application
|
||||
enabled: yes
|
||||
cookie: SESSIONID
|
||||
delegate_to: localhost
|
||||
|
||||
# Create an ELB and add tags
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "New ELB"
|
||||
state: present
|
||||
region: us-east-1
|
||||
|
@ -369,10 +368,10 @@ EXAMPLES = """
|
|||
Name: "New ELB"
|
||||
stack: "production"
|
||||
client: "Bob"
|
||||
delegate_to: localhost
|
||||
|
||||
# Delete all tags from an ELB
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
- elb_classic_lb:
|
||||
name: "New ELB"
|
||||
state: present
|
||||
region: us-east-1
|
||||
|
@ -384,6 +383,7 @@ EXAMPLES = """
|
|||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
tags: {}
|
||||
delegate_to: localhost
|
||||
"""
|
||||
|
||||
import random
|
||||
|
|
|
@ -45,35 +45,29 @@ EXAMPLES = '''
|
|||
# Output format tries to match ec2_elb_lb module input parameters
|
||||
|
||||
# Gather facts about all ELBs
|
||||
- action:
|
||||
module: ec2_elb_facts
|
||||
- elb_classic_lb_facts:
|
||||
register: elb_facts
|
||||
|
||||
- action:
|
||||
module: debug
|
||||
- debug:
|
||||
msg: "{{ item.dns_name }}"
|
||||
with_items: "{{ elb_facts.elbs }}"
|
||||
|
||||
# Gather facts about a particular ELB
|
||||
- action:
|
||||
module: ec2_elb_facts
|
||||
- elb_classic_lb_facts:
|
||||
names: frontend-prod-elb
|
||||
register: elb_facts
|
||||
|
||||
- action:
|
||||
module: debug
|
||||
- debug:
|
||||
msg: "{{ elb_facts.elbs.0.dns_name }}"
|
||||
|
||||
# Gather facts about a set of ELBs
|
||||
- action:
|
||||
module: ec2_elb_facts
|
||||
- elb_classic_lb_facts:
|
||||
names:
|
||||
- frontend-prod-elb
|
||||
- backend-prod-elb
|
||||
register: elb_facts
|
||||
|
||||
- action:
|
||||
module: debug
|
||||
- debug:
|
||||
msg: "{{ item.dns_name }}"
|
||||
with_items: "{{ elb_facts.elbs }}"
|
||||
|
||||
|
@ -208,7 +202,6 @@ class ElbInformation(object):
|
|||
elb_info['instances_inservice_percent'] = 0.
|
||||
return elb_info
|
||||
|
||||
|
||||
def list_elbs(self):
|
||||
elb_array, token = [], None
|
||||
get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers)
|
||||
|
|
|
@ -7,7 +7,7 @@ __metaclass__ = type
|
|||
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['stableinterface'],
|
||||
'status': ['preview'],
|
||||
'supported_by': 'certified'}
|
||||
|
||||
|
||||
|
@ -77,19 +77,19 @@ pre_tasks:
|
|||
- name: Gathering ec2 facts
|
||||
action: ec2_facts
|
||||
- name: Instance De-register
|
||||
local_action:
|
||||
module: ec2_elb
|
||||
elb_instance:
|
||||
instance_id: "{{ ansible_ec2_instance_id }}"
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
roles:
|
||||
- myrole
|
||||
post_tasks:
|
||||
- name: Instance Register
|
||||
local_action:
|
||||
module: ec2_elb
|
||||
elb_instance:
|
||||
instance_id: "{{ ansible_ec2_instance_id }}"
|
||||
ec2_elbs: "{{ item }}"
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
with_items: "{{ ec2_elbs }}"
|
||||
"""
|
||||
|
||||
|
|
2
test/integration/targets/ec2_elb_lb/aliases
Normal file
2
test/integration/targets/ec2_elb_lb/aliases
Normal file
|
@ -0,0 +1,2 @@
|
|||
cloud/aws
|
||||
posix/ci/cloud/group1/aws
|
3
test/integration/targets/ec2_elb_lb/defaults/main.yml
Normal file
3
test/integration/targets/ec2_elb_lb/defaults/main.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
# defaults file for test_ec2_eip
|
||||
tag_prefix: '{{resource_prefix}}'
|
3
test/integration/targets/ec2_elb_lb/meta/main.yml
Normal file
3
test/integration/targets/ec2_elb_lb/meta/main.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
dependencies:
|
||||
- prepare_tests
|
||||
- setup_ec2
|
419
test/integration/targets/ec2_elb_lb/tasks/main.yml
Normal file
419
test/integration/targets/ec2_elb_lb/tasks/main.yml
Normal file
|
@ -0,0 +1,419 @@
|
|||
---
|
||||
# __Test Info__
|
||||
# Create a self signed cert and upload it to AWS
|
||||
# http://www.akadia.com/services/ssh_test_certificate.html
|
||||
# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html
|
||||
|
||||
# __Test Outline__
|
||||
#
|
||||
# __ec2_elb_lb__
|
||||
# create test elb with listeners and certificate
|
||||
# change AZ's
|
||||
# change listeners
|
||||
# remove listeners
|
||||
# remove elb
|
||||
|
||||
# __ec2-common__
|
||||
# test environment variable EC2_REGION
|
||||
# test with no parameters
|
||||
# test with only instance_id
|
||||
# test invalid region parameter
|
||||
# test valid region parameter
|
||||
# test invalid ec2_url parameter
|
||||
# test valid ec2_url parameter
|
||||
# test credentials from environment
|
||||
# test credential parameters
|
||||
|
||||
- block:
|
||||
|
||||
# ============================================================
|
||||
# create test elb with listeners, certificate, and health check
|
||||
|
||||
- name: Create ELB
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
ec2_secret_key: "{{ ec2_secret_key }}"
|
||||
security_token: "{{ security_token }}"
|
||||
state: present
|
||||
zones:
|
||||
- us-east-1c
|
||||
- us-east-1d
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
- protocol: http
|
||||
load_balancer_port: 8080
|
||||
instance_port: 8080
|
||||
health_check:
|
||||
ping_protocol: http
|
||||
ping_port: 80
|
||||
ping_path: "/index.html"
|
||||
response_timeout: 5
|
||||
interval: 30
|
||||
unhealthy_threshold: 2
|
||||
healthy_threshold: 10
|
||||
register: info
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'info.changed'
|
||||
- 'info.elb.status == "created"'
|
||||
- '"us-east-1c" in info.elb.zones'
|
||||
- '"us-east-1d" in info.elb.zones'
|
||||
- 'info.elb.health_check.healthy_threshold == 10'
|
||||
- 'info.elb.health_check.interval == 30'
|
||||
- 'info.elb.health_check.target == "HTTP:80/index.html"'
|
||||
- 'info.elb.health_check.timeout == 5'
|
||||
- 'info.elb.health_check.unhealthy_threshold == 2'
|
||||
- '[80, 80, "HTTP", "HTTP"] in info.elb.listeners'
|
||||
- '[8080, 8080, "HTTP", "HTTP"] in info.elb.listeners'
|
||||
|
||||
# ============================================================
|
||||
|
||||
# check ports, would be cool, but we are at the mercy of AWS
|
||||
# to start things in a timely manner
|
||||
|
||||
#- name: check to make sure 80 is listening
|
||||
# wait_for: host={{ info.elb.dns_name }} port=80 timeout=600
|
||||
# register: result
|
||||
|
||||
#- name: assert can connect to port#
|
||||
# assert: 'result.state == "started"'
|
||||
|
||||
#- name: check to make sure 443 is listening
|
||||
# wait_for: host={{ info.elb.dns_name }} port=443 timeout=600
|
||||
# register: result
|
||||
|
||||
#- name: assert can connect to port#
|
||||
# assert: 'result.state == "started"'
|
||||
|
||||
# ============================================================
|
||||
|
||||
# Change AZ's
|
||||
|
||||
- name: Change AZ's
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
ec2_secret_key: "{{ ec2_secret_key }}"
|
||||
security_token: "{{ security_token }}"
|
||||
state: present
|
||||
zones:
|
||||
- us-east-1b
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
purge_zones: yes
|
||||
health_check:
|
||||
ping_protocol: http
|
||||
ping_port: 80
|
||||
ping_path: "/index.html"
|
||||
response_timeout: 5
|
||||
interval: 30
|
||||
unhealthy_threshold: 2
|
||||
healthy_threshold: 10
|
||||
register: info
|
||||
|
||||
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'info.elb.status == "ok"'
|
||||
- 'info.changed'
|
||||
- 'info.elb.zones[0] == "us-east-1b"'
|
||||
|
||||
# ============================================================
|
||||
|
||||
# Update AZ's
|
||||
|
||||
- name: Update AZ's
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
ec2_secret_key: "{{ ec2_secret_key }}"
|
||||
security_token: "{{ security_token }}"
|
||||
state: present
|
||||
zones:
|
||||
- us-east-1b
|
||||
- us-east-1c
|
||||
- us-east-1d
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
purge_zones: yes
|
||||
register: info
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'info.changed'
|
||||
- 'info.elb.status == "ok"'
|
||||
- '"us-east-1b" in info.elb.zones'
|
||||
- '"us-east-1c" in info.elb.zones'
|
||||
- '"us-east-1d" in info.elb.zones'
|
||||
|
||||
|
||||
# ============================================================
|
||||
|
||||
# Purge Listeners
|
||||
|
||||
- name: Purge Listeners
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
ec2_secret_key: "{{ ec2_secret_key }}"
|
||||
security_token: "{{ security_token }}"
|
||||
state: present
|
||||
zones:
|
||||
- us-east-1b
|
||||
- us-east-1c
|
||||
- us-east-1d
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 81
|
||||
purge_listeners: yes
|
||||
register: info
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'info.elb.status == "ok"'
|
||||
- 'info.changed'
|
||||
- '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
|
||||
- 'info.elb.listeners|length == 1'
|
||||
|
||||
|
||||
|
||||
# ============================================================
|
||||
|
||||
# add Listeners
|
||||
|
||||
- name: Add Listeners
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
ec2_secret_key: "{{ ec2_secret_key }}"
|
||||
security_token: "{{ security_token }}"
|
||||
state: present
|
||||
zones:
|
||||
- us-east-1b
|
||||
- us-east-1c
|
||||
- us-east-1d
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 8081
|
||||
instance_port: 8081
|
||||
purge_listeners: no
|
||||
register: info
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'info.elb.status == "ok"'
|
||||
- 'info.changed'
|
||||
- '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
|
||||
- '[8081, 8081, "HTTP", "HTTP"] in info.elb.listeners'
|
||||
- 'info.elb.listeners|length == 2'
|
||||
|
||||
|
||||
# ============================================================
|
||||
|
||||
- name: test with no parameters
|
||||
ec2_elb_lb:
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert failure when called with no parameters
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg.startswith("missing required arguments: ")'
|
||||
|
||||
|
||||
|
||||
# ============================================================
|
||||
- name: test with only name
|
||||
ec2_elb_lb:
|
||||
name="{{ tag_prefix }}"
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert failure when called with only name
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg == "missing required arguments: state"'
|
||||
|
||||
|
||||
# ============================================================
|
||||
- name: test invalid region parameter
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: 'asdf querty 1234'
|
||||
state: present
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
zones:
|
||||
- us-east-1c
|
||||
- us-east-1d
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert invalid region parameter
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg.startswith("Region asdf querty 1234 does not seem to be available ")'
|
||||
|
||||
|
||||
# ============================================================
|
||||
- name: test valid region parameter
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: present
|
||||
zones:
|
||||
- us-east-1a
|
||||
- us-east-1d
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert valid region parameter
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg.startswith("No handler was ready to authenticate.")'
|
||||
|
||||
|
||||
# ============================================================
|
||||
|
||||
- name: test invalid ec2_url parameter
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: present
|
||||
zones:
|
||||
- us-east-1a
|
||||
- us-east-1d
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
environment:
|
||||
EC2_URL: bogus.example.com
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert invalid ec2_url parameter
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg.startswith("No handler was ready to authenticate.")'
|
||||
|
||||
|
||||
# ============================================================
|
||||
- name: test valid ec2_url parameter
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: present
|
||||
zones:
|
||||
- us-east-1a
|
||||
- us-east-1d
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
environment:
|
||||
EC2_URL: '{{ec2_url}}'
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert valid ec2_url parameter
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg.startswith("No handler was ready to authenticate.")'
|
||||
|
||||
|
||||
# ============================================================
|
||||
- name: test credentials from environment
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: present
|
||||
zones:
|
||||
- us-east-1a
|
||||
- us-east-1d
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
environment:
|
||||
EC2_ACCESS_KEY: bogus_access_key
|
||||
EC2_SECRET_KEY: bogus_secret_key
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert credentials from environment
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- '"InvalidClientTokenId" in result.exception'
|
||||
|
||||
|
||||
# ============================================================
|
||||
- name: test credential parameters
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: present
|
||||
zones:
|
||||
- us-east-1a
|
||||
- us-east-1d
|
||||
listeners:
|
||||
- protocol: http
|
||||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert credential parameters
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- '"No handler was ready to authenticate. 1 handlers were checked." in result.msg'
|
||||
|
||||
always:
|
||||
|
||||
# ============================================================
|
||||
- name: remove the test load balancer completely
|
||||
ec2_elb_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: absent
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
ec2_secret_key: "{{ ec2_secret_key }}"
|
||||
security_token: "{{ security_token }}"
|
||||
register: result
|
||||
|
||||
- name: assert the load balancer was removed
|
||||
assert:
|
||||
that:
|
||||
- 'result.changed'
|
||||
- 'result.elb.name == "{{tag_prefix}}"'
|
||||
- 'result.elb.status == "deleted"'
|
2
test/integration/targets/ec2_elb_lb/vars/main.yml
Normal file
2
test/integration/targets/ec2_elb_lb/vars/main.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
# vars file for test_ec2_elb_lb
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
# __Test Outline__
|
||||
#
|
||||
# __ec2_elb_lb__
|
||||
# __elb_classic_lb__
|
||||
# create test elb with listeners and certificate
|
||||
# change AZ's
|
||||
# change listeners
|
||||
|
@ -30,7 +30,7 @@
|
|||
# create test elb with listeners, certificate, and health check
|
||||
|
||||
- name: Create ELB
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
|
@ -95,7 +95,7 @@
|
|||
# Change AZ's
|
||||
|
||||
- name: Change AZ's
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
|
@ -132,7 +132,7 @@
|
|||
# Update AZ's
|
||||
|
||||
- name: Update AZ's
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
|
@ -164,7 +164,7 @@
|
|||
# Purge Listeners
|
||||
|
||||
- name: Purge Listeners
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
|
@ -196,7 +196,7 @@
|
|||
# add Listeners
|
||||
|
||||
- name: Add Listeners
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
ec2_access_key: "{{ ec2_access_key }}"
|
||||
|
@ -226,7 +226,7 @@
|
|||
# ============================================================
|
||||
|
||||
- name: test with no parameters
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
|
@ -240,7 +240,7 @@
|
|||
|
||||
# ============================================================
|
||||
- name: test with only name
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name="{{ tag_prefix }}"
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
@ -254,7 +254,7 @@
|
|||
|
||||
# ============================================================
|
||||
- name: test invalid region parameter
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: 'asdf querty 1234'
|
||||
state: present
|
||||
|
@ -277,7 +277,7 @@
|
|||
|
||||
# ============================================================
|
||||
- name: test valid region parameter
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: present
|
||||
|
@ -302,7 +302,7 @@
|
|||
# ============================================================
|
||||
|
||||
- name: test invalid ec2_url parameter
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: present
|
||||
|
@ -327,7 +327,7 @@
|
|||
|
||||
# ============================================================
|
||||
- name: test valid ec2_url parameter
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: present
|
||||
|
@ -352,7 +352,7 @@
|
|||
|
||||
# ============================================================
|
||||
- name: test credentials from environment
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: present
|
||||
|
@ -378,7 +378,7 @@
|
|||
|
||||
# ============================================================
|
||||
- name: test credential parameters
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: present
|
||||
|
@ -402,7 +402,7 @@
|
|||
|
||||
# ============================================================
|
||||
- name: remove the test load balancer completely
|
||||
ec2_elb_lb:
|
||||
elb_classic_lb:
|
||||
name: "{{ tag_prefix }}"
|
||||
region: "{{ ec2_region }}"
|
||||
state: absent
|
||||
|
|
|
@ -4,9 +4,8 @@ lib/ansible/config/data.py
|
|||
lib/ansible/config/manager.py
|
||||
lib/ansible/modules/cloud/amazon/_ec2_ami_search.py
|
||||
lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py
|
||||
lib/ansible/modules/cloud/amazon/_ec2_elb.py
|
||||
lib/ansible/modules/cloud/amazon/_ec2_elb_facts.py
|
||||
lib/ansible/modules/cloud/amazon/_ec2_elb_lb.py
|
||||
lib/ansible/modules/cloud/amazon/ec2_elb.py
|
||||
lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
|
||||
lib/ansible/modules/cloud/amazon/_ec2_vpc.py
|
||||
lib/ansible/modules/cloud/amazon/_ec2_vpc_dhcp_options.py
|
||||
lib/ansible/modules/cloud/openstack/_os_server_actions.py
|
||||
|
@ -46,7 +45,6 @@ lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
|
|||
lib/ansible/modules/cloud/amazon/efs.py
|
||||
lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py
|
||||
lib/ansible/modules/cloud/amazon/elb_instance.py
|
||||
lib/ansible/modules/cloud/amazon/elb_classic_lb_facts.py
|
||||
lib/ansible/modules/cloud/amazon/elb_classic_lb.py
|
||||
lib/ansible/modules/cloud/amazon/execute_lambda.py
|
||||
lib/ansible/modules/cloud/amazon/iam.py
|
||||
|
|
Loading…
Reference in a new issue