2013-10-01 00:58:25 +02:00
|
|
|
#!/usr/bin/python
|
|
|
|
# This file is part of Ansible
|
|
|
|
#
|
|
|
|
# Ansible is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# Ansible is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
DOCUMENTATION = """
|
|
|
|
---
|
|
|
|
module: elasticache
|
2013-12-26 00:07:49 +01:00
|
|
|
short_description: Manage cache clusters in Amazon Elasticache.
|
2013-11-13 16:15:51 +01:00
|
|
|
description:
|
|
|
|
- Manage cache clusters in Amazon Elasticache.
|
2013-10-01 00:58:25 +02:00
|
|
|
- Returns information about the specified cache cluster.
|
|
|
|
version_added: "1.4"
|
|
|
|
requirements: [ "boto" ]
|
|
|
|
author: Jim Dalton
|
|
|
|
options:
|
|
|
|
state:
|
|
|
|
description:
|
|
|
|
- C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster, resulting in a momentary outage.
|
|
|
|
choices: ['present', 'absent', 'rebooted']
|
|
|
|
required: true
|
|
|
|
name:
|
|
|
|
description:
|
|
|
|
- The cache cluster identifier
|
|
|
|
required: true
|
|
|
|
engine:
|
|
|
|
description:
|
|
|
|
- Name of the cache engine to be used (memcached or redis)
|
|
|
|
required: false
|
|
|
|
default: memcached
|
|
|
|
cache_engine_version:
|
|
|
|
description:
|
|
|
|
- The version number of the cache engine
|
|
|
|
required: false
|
|
|
|
default: 1.4.14
|
|
|
|
node_type:
|
|
|
|
description:
|
|
|
|
- The compute and memory capacity of the nodes in the cache cluster
|
|
|
|
required: false
|
|
|
|
default: cache.m1.small
|
|
|
|
num_nodes:
|
|
|
|
description:
|
|
|
|
- The initial number of cache nodes that the cache cluster will have
|
|
|
|
required: false
|
|
|
|
cache_port:
|
|
|
|
description:
|
|
|
|
- The port number on which each of the cache nodes will accept connections
|
|
|
|
required: false
|
|
|
|
default: 11211
|
2014-10-06 16:49:22 +02:00
|
|
|
cache_subnet_group:
|
|
|
|
description:
|
|
|
|
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
|
|
|
|
required: conditional
|
|
|
|
default: None
|
|
|
|
version_added: "1.7"
|
2014-02-12 15:53:29 +01:00
|
|
|
security_group_ids:
|
|
|
|
description:
|
|
|
|
- A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
|
|
|
|
required: false
|
|
|
|
default: ['default']
|
2014-03-11 16:44:47 +01:00
|
|
|
version_added: "1.6"
|
2013-10-01 00:58:25 +02:00
|
|
|
cache_security_groups:
|
|
|
|
description:
|
2014-10-06 16:49:22 +02:00
|
|
|
- A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc
|
2013-10-01 00:58:25 +02:00
|
|
|
required: false
|
|
|
|
default: ['default']
|
|
|
|
zone:
|
|
|
|
description:
|
|
|
|
- The EC2 Availability Zone in which the cache cluster will be created
|
|
|
|
required: false
|
|
|
|
default: None
|
|
|
|
wait:
|
|
|
|
description:
|
|
|
|
- Wait for cache cluster result before returning
|
|
|
|
required: false
|
|
|
|
default: yes
|
|
|
|
choices: [ "yes", "no" ]
|
|
|
|
hard_modify:
|
|
|
|
description:
|
|
|
|
- Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state
|
|
|
|
required: false
|
|
|
|
default: no
|
|
|
|
choices: [ "yes", "no" ]
|
|
|
|
region:
|
|
|
|
description:
|
2014-12-25 01:04:25 +01:00
|
|
|
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
|
|
|
|
required: true
|
|
|
|
default: null
|
2013-10-01 00:58:25 +02:00
|
|
|
aliases: ['aws_region', 'ec2_region']
|
2014-12-25 01:04:25 +01:00
|
|
|
extends_documentation_fragment: aws
|
2013-10-01 00:58:25 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
EXAMPLES = """
|
|
|
|
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
|
|
|
|
# It is assumed that their matching environment variables are set.
|
|
|
|
|
|
|
|
# Basic example
|
2014-12-01 21:14:57 +01:00
|
|
|
- elasticache:
|
2013-10-01 00:58:25 +02:00
|
|
|
name: "test-please-delete"
|
|
|
|
state: present
|
|
|
|
engine: memcached
|
|
|
|
cache_engine_version: 1.4.14
|
|
|
|
node_type: cache.m1.small
|
|
|
|
num_nodes: 1
|
|
|
|
cache_port: 11211
|
|
|
|
cache_security_groups:
|
|
|
|
- default
|
|
|
|
zone: us-east-1d
|
|
|
|
|
|
|
|
|
|
|
|
# Ensure cache cluster is gone
|
2014-12-01 21:14:57 +01:00
|
|
|
- elasticache:
|
2013-10-01 00:58:25 +02:00
|
|
|
name: "test-please-delete"
|
|
|
|
state: absent
|
|
|
|
|
|
|
|
# Reboot cache cluster
|
2014-12-01 21:14:57 +01:00
|
|
|
- elasticache:
|
2013-10-01 00:58:25 +02:00
|
|
|
name: "test-please-delete"
|
|
|
|
state: rebooted
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
import sys
|
|
|
|
import os
|
|
|
|
import time
|
|
|
|
|
|
|
|
try:
|
|
|
|
import boto
|
|
|
|
from boto.elasticache.layer1 import ElastiCacheConnection
|
|
|
|
from boto.regioninfo import RegionInfo
|
|
|
|
except ImportError:
|
|
|
|
print "failed=True msg='boto required for this module'"
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
class ElastiCacheManager(object):
|
|
|
|
"""Handles elasticache creation and destruction"""
|
|
|
|
|
|
|
|
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
|
|
|
|
|
|
|
|
def __init__(self, module, name, engine, cache_engine_version, node_type,
|
2014-10-06 16:49:22 +02:00
|
|
|
num_nodes, cache_port, cache_subnet_group,
|
|
|
|
cache_security_groups, security_group_ids, zone, wait,
|
2014-12-25 01:04:25 +01:00
|
|
|
hard_modify, region, **aws_connect_kwargs):
|
2013-10-01 00:58:25 +02:00
|
|
|
self.module = module
|
|
|
|
self.name = name
|
|
|
|
self.engine = engine
|
|
|
|
self.cache_engine_version = cache_engine_version
|
|
|
|
self.node_type = node_type
|
|
|
|
self.num_nodes = num_nodes
|
|
|
|
self.cache_port = cache_port
|
2014-10-06 16:49:22 +02:00
|
|
|
self.cache_subnet_group = cache_subnet_group
|
2013-10-01 00:58:25 +02:00
|
|
|
self.cache_security_groups = cache_security_groups
|
2014-02-12 15:53:29 +01:00
|
|
|
self.security_group_ids = security_group_ids
|
2013-10-01 00:58:25 +02:00
|
|
|
self.zone = zone
|
|
|
|
self.wait = wait
|
|
|
|
self.hard_modify = hard_modify
|
|
|
|
|
|
|
|
self.region = region
|
2014-12-25 01:04:25 +01:00
|
|
|
self.aws_connect_kwargs = aws_connect_kwargs
|
2013-10-01 00:58:25 +02:00
|
|
|
|
|
|
|
self.changed = False
|
|
|
|
self.data = None
|
|
|
|
self.status = 'gone'
|
|
|
|
self.conn = self._get_elasticache_connection()
|
|
|
|
self._refresh_data()
|
|
|
|
|
|
|
|
def ensure_present(self):
|
|
|
|
"""Ensure cache cluster exists or create it if not"""
|
|
|
|
if self.exists():
|
|
|
|
self.sync()
|
|
|
|
else:
|
|
|
|
self.create()
|
|
|
|
|
|
|
|
def ensure_absent(self):
|
|
|
|
"""Ensure cache cluster is gone or delete it if not"""
|
|
|
|
self.delete()
|
|
|
|
|
|
|
|
def ensure_rebooted(self):
|
|
|
|
"""Ensure cache cluster is gone or delete it if not"""
|
|
|
|
self.reboot()
|
|
|
|
|
|
|
|
def exists(self):
|
|
|
|
"""Check if cache cluster exists"""
|
|
|
|
return self.status in self.EXIST_STATUSES
|
|
|
|
|
|
|
|
def create(self):
|
|
|
|
"""Create an ElastiCache cluster"""
|
|
|
|
if self.status == 'available':
|
|
|
|
return
|
|
|
|
if self.status in ['creating', 'rebooting', 'modifying']:
|
|
|
|
if self.wait:
|
|
|
|
self._wait_for_status('available')
|
|
|
|
return
|
|
|
|
if self.status == 'deleting':
|
|
|
|
if self.wait:
|
|
|
|
self._wait_for_status('gone')
|
|
|
|
else:
|
|
|
|
msg = "'%s' is currently deleting. Cannot create."
|
|
|
|
self.module.fail_json(msg=msg % self.name)
|
|
|
|
|
|
|
|
try:
|
|
|
|
response = self.conn.create_cache_cluster(cache_cluster_id=self.name,
|
|
|
|
num_cache_nodes=self.num_nodes,
|
|
|
|
cache_node_type=self.node_type,
|
|
|
|
engine=self.engine,
|
|
|
|
engine_version=self.cache_engine_version,
|
|
|
|
cache_security_group_names=self.cache_security_groups,
|
2014-02-12 15:53:29 +01:00
|
|
|
security_group_ids=self.security_group_ids,
|
2014-10-06 16:49:22 +02:00
|
|
|
cache_subnet_group_name=self.cache_subnet_group,
|
2013-10-01 00:58:25 +02:00
|
|
|
preferred_availability_zone=self.zone,
|
|
|
|
port=self.cache_port)
|
|
|
|
except boto.exception.BotoServerError, e:
|
|
|
|
self.module.fail_json(msg=e.message)
|
|
|
|
cache_cluster_data = response['CreateCacheClusterResponse']['CreateCacheClusterResult']['CacheCluster']
|
|
|
|
self._refresh_data(cache_cluster_data)
|
|
|
|
|
|
|
|
self.changed = True
|
|
|
|
if self.wait:
|
|
|
|
self._wait_for_status('available')
|
|
|
|
return True
|
|
|
|
|
|
|
|
def delete(self):
|
|
|
|
"""Destroy an ElastiCache cluster"""
|
|
|
|
if self.status == 'gone':
|
|
|
|
return
|
|
|
|
if self.status == 'deleting':
|
|
|
|
if self.wait:
|
|
|
|
self._wait_for_status('gone')
|
|
|
|
return
|
|
|
|
if self.status in ['creating', 'rebooting', 'modifying']:
|
|
|
|
if self.wait:
|
|
|
|
self._wait_for_status('available')
|
|
|
|
else:
|
|
|
|
msg = "'%s' is currently %s. Cannot delete."
|
|
|
|
self.module.fail_json(msg=msg % (self.name, self.status))
|
|
|
|
|
|
|
|
try:
|
|
|
|
response = self.conn.delete_cache_cluster(cache_cluster_id=self.name)
|
|
|
|
except boto.exception.BotoServerError, e:
|
|
|
|
self.module.fail_json(msg=e.message)
|
|
|
|
cache_cluster_data = response['DeleteCacheClusterResponse']['DeleteCacheClusterResult']['CacheCluster']
|
|
|
|
self._refresh_data(cache_cluster_data)
|
|
|
|
|
|
|
|
self.changed = True
|
|
|
|
if self.wait:
|
|
|
|
self._wait_for_status('gone')
|
|
|
|
|
|
|
|
def sync(self):
|
|
|
|
"""Sync settings to cluster if required"""
|
|
|
|
if not self.exists():
|
|
|
|
msg = "'%s' is %s. Cannot sync."
|
|
|
|
self.module.fail_json(msg=msg % (self.name, self.status))
|
|
|
|
|
|
|
|
if self.status in ['creating', 'rebooting', 'modifying']:
|
|
|
|
if self.wait:
|
|
|
|
self._wait_for_status('available')
|
|
|
|
else:
|
|
|
|
# Cluster can only be synced if available. If we can't wait
|
|
|
|
# for this, then just be done.
|
|
|
|
return
|
|
|
|
|
|
|
|
if self._requires_destroy_and_create():
|
|
|
|
if not self.hard_modify:
|
|
|
|
msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
|
|
|
|
self.module.fail_json(msg=msg % self.name)
|
|
|
|
if not self.wait:
|
|
|
|
msg = "'%s' requires destructive modification. 'wait' must be set to true."
|
|
|
|
self.module.fail_json(msg=msg % self.name)
|
|
|
|
self.delete()
|
|
|
|
self.create()
|
|
|
|
return
|
|
|
|
|
|
|
|
if self._requires_modification():
|
|
|
|
self.modify()
|
|
|
|
|
|
|
|
def modify(self):
|
|
|
|
"""Modify the cache cluster. Note it's only possible to modify a few select options."""
|
|
|
|
nodes_to_remove = self._get_nodes_to_remove()
|
|
|
|
try:
|
|
|
|
response = self.conn.modify_cache_cluster(cache_cluster_id=self.name,
|
|
|
|
num_cache_nodes=self.num_nodes,
|
|
|
|
cache_node_ids_to_remove=nodes_to_remove,
|
|
|
|
cache_security_group_names=self.cache_security_groups,
|
2014-02-12 15:53:29 +01:00
|
|
|
security_group_ids=self.security_group_ids,
|
2013-10-01 00:58:25 +02:00
|
|
|
apply_immediately=True,
|
|
|
|
engine_version=self.cache_engine_version)
|
|
|
|
except boto.exception.BotoServerError, e:
|
|
|
|
self.module.fail_json(msg=e.message)
|
|
|
|
|
|
|
|
cache_cluster_data = response['ModifyCacheClusterResponse']['ModifyCacheClusterResult']['CacheCluster']
|
|
|
|
self._refresh_data(cache_cluster_data)
|
|
|
|
|
|
|
|
self.changed = True
|
|
|
|
if self.wait:
|
|
|
|
self._wait_for_status('available')
|
|
|
|
|
|
|
|
def reboot(self):
|
|
|
|
"""Reboot the cache cluster"""
|
|
|
|
if not self.exists():
|
|
|
|
msg = "'%s' is %s. Cannot reboot."
|
|
|
|
self.module.fail_json(msg=msg % (self.name, self.status))
|
|
|
|
if self.status == 'rebooting':
|
|
|
|
return
|
|
|
|
if self.status in ['creating', 'modifying']:
|
|
|
|
if self.wait:
|
|
|
|
self._wait_for_status('available')
|
|
|
|
else:
|
|
|
|
msg = "'%s' is currently %s. Cannot reboot."
|
|
|
|
self.module.fail_json(msg=msg % (self.name, self.status))
|
|
|
|
|
|
|
|
# Collect ALL nodes for reboot
|
|
|
|
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
|
|
|
|
try:
|
|
|
|
response = self.conn.reboot_cache_cluster(cache_cluster_id=self.name,
|
|
|
|
cache_node_ids_to_reboot=cache_node_ids)
|
|
|
|
except boto.exception.BotoServerError, e:
|
|
|
|
self.module.fail_json(msg=e.message)
|
|
|
|
|
|
|
|
cache_cluster_data = response['RebootCacheClusterResponse']['RebootCacheClusterResult']['CacheCluster']
|
|
|
|
self._refresh_data(cache_cluster_data)
|
|
|
|
|
|
|
|
self.changed = True
|
|
|
|
if self.wait:
|
|
|
|
self._wait_for_status('available')
|
|
|
|
|
|
|
|
def get_info(self):
|
|
|
|
"""Return basic info about the cache cluster"""
|
|
|
|
info = {
|
|
|
|
'name': self.name,
|
|
|
|
'status': self.status
|
|
|
|
}
|
|
|
|
if self.data:
|
|
|
|
info['data'] = self.data
|
|
|
|
return info
|
|
|
|
|
|
|
|
|
|
|
|
def _wait_for_status(self, awaited_status):
|
|
|
|
"""Wait for status to change from present status to awaited_status"""
|
|
|
|
status_map = {
|
|
|
|
'creating': 'available',
|
|
|
|
'rebooting': 'available',
|
|
|
|
'modifying': 'available',
|
|
|
|
'deleting': 'gone'
|
|
|
|
}
|
2015-01-14 07:39:20 +01:00
|
|
|
if self.status == awaited_status:
|
|
|
|
# No need to wait, we're already done
|
|
|
|
return
|
2013-10-01 00:58:25 +02:00
|
|
|
if status_map[self.status] != awaited_status:
|
|
|
|
msg = "Invalid awaited status. '%s' cannot transition to '%s'"
|
|
|
|
self.module.fail_json(msg=msg % (self.status, awaited_status))
|
|
|
|
|
|
|
|
if awaited_status not in set(status_map.values()):
|
|
|
|
msg = "'%s' is not a valid awaited status."
|
|
|
|
self.module.fail_json(msg=msg % awaited_status)
|
|
|
|
|
|
|
|
while True:
|
|
|
|
time.sleep(1)
|
|
|
|
self._refresh_data()
|
|
|
|
if self.status == awaited_status:
|
|
|
|
break
|
|
|
|
|
|
|
|
def _requires_modification(self):
|
|
|
|
"""Check if cluster requires (nondestructive) modification"""
|
|
|
|
# Check modifiable data attributes
|
|
|
|
modifiable_data = {
|
|
|
|
'NumCacheNodes': self.num_nodes,
|
|
|
|
'EngineVersion': self.cache_engine_version
|
|
|
|
}
|
|
|
|
for key, value in modifiable_data.iteritems():
|
|
|
|
if self.data[key] != value:
|
|
|
|
return True
|
|
|
|
|
2014-02-12 15:53:29 +01:00
|
|
|
# Check cache security groups
|
2013-10-01 00:58:25 +02:00
|
|
|
cache_security_groups = []
|
|
|
|
for sg in self.data['CacheSecurityGroups']:
|
|
|
|
cache_security_groups.append(sg['CacheSecurityGroupName'])
|
|
|
|
if set(cache_security_groups) - set(self.cache_security_groups):
|
|
|
|
return True
|
2014-02-12 15:53:29 +01:00
|
|
|
|
|
|
|
# check vpc security groups
|
|
|
|
vpc_security_groups = []
|
2014-04-12 01:23:53 +02:00
|
|
|
security_groups = self.data['SecurityGroups'] or []
|
|
|
|
for sg in security_groups:
|
2014-02-12 15:53:29 +01:00
|
|
|
vpc_security_groups.append(sg['SecurityGroupId'])
|
|
|
|
if set(vpc_security_groups) - set(self.security_group_ids):
|
|
|
|
return True
|
|
|
|
|
2013-10-01 00:58:25 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
def _requires_destroy_and_create(self):
|
|
|
|
"""
|
|
|
|
Check whether a destroy and create is required to synchronize cluster.
|
|
|
|
"""
|
|
|
|
unmodifiable_data = {
|
|
|
|
'node_type': self.data['CacheNodeType'],
|
|
|
|
'engine': self.data['Engine'],
|
2013-10-19 21:29:07 +02:00
|
|
|
'cache_port': self._get_port()
|
2013-10-01 00:58:25 +02:00
|
|
|
}
|
2013-10-21 01:20:36 +02:00
|
|
|
# Only check for modifications if zone is specified
|
|
|
|
if self.zone is not None:
|
|
|
|
unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
|
2013-10-01 00:58:25 +02:00
|
|
|
for key, value in unmodifiable_data.iteritems():
|
|
|
|
if getattr(self, key) != value:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def _get_elasticache_connection(self):
|
|
|
|
"""Get an elasticache connection"""
|
|
|
|
try:
|
|
|
|
endpoint = "elasticache.%s.amazonaws.com" % self.region
|
|
|
|
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
|
2014-12-25 01:04:25 +01:00
|
|
|
return ElastiCacheConnection(
|
|
|
|
region=connect_region,
|
|
|
|
**self.aws_connect_kwargs
|
|
|
|
)
|
2013-10-01 00:58:25 +02:00
|
|
|
except boto.exception.NoAuthHandlerFound, e:
|
|
|
|
self.module.fail_json(msg=e.message)
|
|
|
|
|
2013-10-19 21:29:07 +02:00
|
|
|
def _get_port(self):
|
|
|
|
"""Get the port. Where this information is retrieved from is engine dependent."""
|
|
|
|
if self.data['Engine'] == 'memcached':
|
|
|
|
return self.data['ConfigurationEndpoint']['Port']
|
|
|
|
elif self.data['Engine'] == 'redis':
|
|
|
|
# Redis only supports a single node (presently) so just use
|
|
|
|
# the first and only
|
|
|
|
return self.data['CacheNodes'][0]['Endpoint']['Port']
|
|
|
|
|
2013-10-01 00:58:25 +02:00
|
|
|
def _refresh_data(self, cache_cluster_data=None):
|
|
|
|
"""Refresh data about this cache cluster"""
|
|
|
|
if cache_cluster_data is None:
|
|
|
|
try:
|
|
|
|
response = self.conn.describe_cache_clusters(cache_cluster_id=self.name,
|
|
|
|
show_cache_node_info=True)
|
|
|
|
except boto.exception.BotoServerError:
|
|
|
|
self.data = None
|
|
|
|
self.status = 'gone'
|
|
|
|
return
|
|
|
|
cache_cluster_data = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'][0]
|
|
|
|
self.data = cache_cluster_data
|
|
|
|
self.status = self.data['CacheClusterStatus']
|
|
|
|
|
|
|
|
# The documentation for elasticache lies -- status on rebooting is set
|
|
|
|
# to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
|
|
|
|
# here to make status checks etc. more sane.
|
|
|
|
if self.status == 'rebooting cache cluster nodes':
|
|
|
|
self.status = 'rebooting'
|
|
|
|
|
|
|
|
def _get_nodes_to_remove(self):
|
|
|
|
"""If there are nodes to remove, it figures out which need to be removed"""
|
|
|
|
num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
|
|
|
|
if num_nodes_to_remove <= 0:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if not self.hard_modify:
|
|
|
|
msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
|
|
|
|
self.module.fail_json(msg=msg % self.name)
|
|
|
|
|
|
|
|
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
|
|
|
|
return cache_node_ids[-num_nodes_to_remove:]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
2014-02-09 00:35:26 +01:00
|
|
|
argument_spec = ec2_argument_spec()
|
2015-03-12 20:14:14 +01:00
|
|
|
default = object()
|
2014-02-09 00:35:26 +01:00
|
|
|
argument_spec.update(dict(
|
2013-10-01 00:58:25 +02:00
|
|
|
state={'required': True, 'choices': ['present', 'absent', 'rebooted']},
|
|
|
|
name={'required': True},
|
|
|
|
engine={'required': False, 'default': 'memcached'},
|
|
|
|
cache_engine_version={'required': False, 'default': '1.4.14'},
|
|
|
|
node_type={'required': False, 'default': 'cache.m1.small'},
|
|
|
|
num_nodes={'required': False, 'default': None, 'type': 'int'},
|
|
|
|
cache_port={'required': False, 'default': 11211, 'type': 'int'},
|
2014-10-06 16:49:22 +02:00
|
|
|
cache_subnet_group={'required': False, 'default': None},
|
2015-03-12 20:14:14 +01:00
|
|
|
cache_security_groups={'required': False, 'default': [default],
|
2013-10-01 00:58:25 +02:00
|
|
|
'type': 'list'},
|
2014-02-12 15:53:29 +01:00
|
|
|
security_group_ids={'required': False, 'default': [],
|
|
|
|
'type': 'list'},
|
2013-10-01 00:58:25 +02:00
|
|
|
zone={'required': False, 'default': None},
|
2014-03-28 16:41:35 +01:00
|
|
|
wait={'required': False, 'type' : 'bool', 'default': True},
|
|
|
|
hard_modify={'required': False, 'type': 'bool', 'default': False}
|
2013-10-01 00:58:25 +02:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2014-02-09 00:35:26 +01:00
|
|
|
module = AnsibleModule(
|
|
|
|
argument_spec=argument_spec,
|
|
|
|
)
|
|
|
|
|
2014-12-25 01:04:25 +01:00
|
|
|
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
2013-10-01 00:58:25 +02:00
|
|
|
|
|
|
|
name = module.params['name']
|
|
|
|
state = module.params['state']
|
|
|
|
engine = module.params['engine']
|
|
|
|
cache_engine_version = module.params['cache_engine_version']
|
|
|
|
node_type = module.params['node_type']
|
|
|
|
num_nodes = module.params['num_nodes']
|
|
|
|
cache_port = module.params['cache_port']
|
2014-10-06 16:49:22 +02:00
|
|
|
cache_subnet_group = module.params['cache_subnet_group']
|
2013-10-01 00:58:25 +02:00
|
|
|
cache_security_groups = module.params['cache_security_groups']
|
2014-02-12 15:53:29 +01:00
|
|
|
security_group_ids = module.params['security_group_ids']
|
2013-10-01 00:58:25 +02:00
|
|
|
zone = module.params['zone']
|
|
|
|
wait = module.params['wait']
|
|
|
|
hard_modify = module.params['hard_modify']
|
|
|
|
|
2015-03-12 20:14:14 +01:00
|
|
|
if cache_subnet_group and cache_security_groups == [default]:
|
|
|
|
cache_security_groups = []
|
|
|
|
if cache_subnet_group and cache_security_groups:
|
|
|
|
module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
|
|
|
|
|
|
|
|
if cache_security_groups == [default]:
|
|
|
|
cache_security_groups = ['default']
|
|
|
|
|
2013-10-01 00:58:25 +02:00
|
|
|
if state == 'present' and not num_nodes:
|
|
|
|
module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
|
|
|
|
|
|
|
|
if not region:
|
2014-12-25 01:04:25 +01:00
|
|
|
module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
|
2013-10-01 00:58:25 +02:00
|
|
|
|
|
|
|
elasticache_manager = ElastiCacheManager(module, name, engine,
|
|
|
|
cache_engine_version, node_type,
|
|
|
|
num_nodes, cache_port,
|
2014-10-06 16:49:22 +02:00
|
|
|
cache_subnet_group,
|
2014-02-12 15:53:29 +01:00
|
|
|
cache_security_groups,
|
|
|
|
security_group_ids, zone, wait,
|
2014-12-25 01:04:25 +01:00
|
|
|
hard_modify, region, **aws_connect_kwargs)
|
2013-10-01 00:58:25 +02:00
|
|
|
|
|
|
|
if state == 'present':
|
|
|
|
elasticache_manager.ensure_present()
|
|
|
|
elif state == 'absent':
|
|
|
|
elasticache_manager.ensure_absent()
|
|
|
|
elif state == 'rebooted':
|
|
|
|
elasticache_manager.ensure_rebooted()
|
|
|
|
|
|
|
|
facts_result = dict(changed=elasticache_manager.changed,
|
|
|
|
elasticache=elasticache_manager.get_info())
|
|
|
|
|
|
|
|
module.exit_json(**facts_result)
|
|
|
|
|
2013-11-13 16:24:08 +01:00
|
|
|
# import module snippets
|
|
|
|
from ansible.module_utils.basic import *
|
|
|
|
from ansible.module_utils.ec2 import *
|
2013-10-01 00:58:25 +02:00
|
|
|
|
|
|
|
main()
|