AWS Redshift: port module to boto3 and fix parameters check (#37052)

* fix parameters check and port module to boto3

* begin with integration tests

* allow redshift iam policy

* Wait for cluster to be created before moving on to delete it

* Allow sts credentials so this can be run in CI

Don't log credentials

ensure cluster can be removed

* - Replace DIY waiters with boto3 waiters
- test multi node cluster

* catch specific boto3 error codes

* remove wait from test

* add missing alias for shippable

* - Rework modify function.
- Default unavailable parameters to none.
- Add cluster modify test

* Ensure resources are cleaned up if tests fail

* Ensure all botocore ClientError and BotoCoreError exceptions are handled
This commit is contained in:
Rafael Driutti 2019-02-21 18:04:42 -05:00 committed by Sloane Hertel
parent 5e3e0eb946
commit c68838fb13
6 changed files with 458 additions and 113 deletions

View file

@ -0,0 +1,20 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowRedshiftManagment",
"Action": [
"redshift:CreateCluster",
"redshift:CreateTags",
"redshift:DeleteCluster",
"redshift:DeleteTags",
"redshift:DescribeClusters",
"redshift:DescribeTags",
"redshift:ModifyCluster",
"redshift:RebootCluster"
],
"Effect": "Allow",
"Resource": "*"
}
]
}

View file

@ -4,8 +4,8 @@
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
@ -16,6 +16,7 @@ DOCUMENTATION = '''
---
author:
- "Jens Carl (@j-carl), Hothead Games Inc."
- "Rafael Driutti (@rafaeldriutti)"
module: redshift
version_added: "2.2"
short_description: create, delete, or modify an Amazon Redshift instance
@ -123,15 +124,15 @@ options:
aliases: ['new_identifier']
wait:
description:
- When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be
terminated.
- When command=create, modify or restore then wait for the database to enter the 'available' state.
When command=delete wait for the database to be terminated.
type: bool
default: 'no'
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
requirements: [ 'boto' ]
requirements: [ 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
@ -181,7 +182,7 @@ cluster:
type: str
sample: "new_db_name"
availability_zone:
description: Amazon availability zone where the cluster is located.
description: Amazon availability zone where the cluster is located. "None" until cluster is available.
returned: success
type: str
sample: "us-east-1b"
@ -196,54 +197,68 @@ cluster:
type: str
sample: "10.10.10.10"
public_ip_address:
description: Public IP address of the main node.
description: Public IP address of the main node. "None" when enhanced_vpc_routing is enabled.
returned: success
type: str
sample: "0.0.0.0"
port:
description: Port of the cluster.
description: Port of the cluster. "None" until cluster is available.
returned: success
type: int
sample: 5439
url:
description: FQDN of the main cluster node.
description: FQDN of the main cluster node. "None" until cluster is available.
returned: success
type: str
sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
enhanced_vpc_routing:
description: status of the enhanced vpc routing feature.
returned: success
type: boolean
'''
import time
try:
import boto.exception
import boto.redshift
import botocore
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
pass # handled by AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec, snake_dict_to_camel_dict
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
def _collect_facts(resource):
"""Transfrom cluster information to dict."""
facts = {
'identifier': resource['ClusterIdentifier'],
'create_time': resource['ClusterCreateTime'],
'status': resource['ClusterStatus'],
'username': resource['MasterUsername'],
'db_name': resource['DBName'],
'availability_zone': resource['AvailabilityZone'],
'maintenance_window': resource['PreferredMaintenanceWindow'],
'url': resource['Endpoint']['Address'],
'port': resource['Endpoint']['Port']
'enhanced_vpc_routing': resource['EnhancedVpcRouting']
}
for node in resource['ClusterNodes']:
if node['NodeRole'] in ('SHARED', 'LEADER'):
facts['private_ip_address'] = node['PrivateIPAddress']
if facts['enhanced_vpc_routing'] is False:
facts['public_ip_address'] = node['PublicIPAddress']
else:
facts['public_ip_address'] = None
break
# Some parameters are not ready instantly if you don't wait for available
# cluster status
facts['create_time'] = None
facts['url'] = None
facts['port'] = None
facts['availability_zone'] = None
if resource['ClusterStatus'] != "creating":
facts['create_time'] = resource['ClusterCreateTime']
facts['url'] = resource['Endpoint']['Address']
facts['port'] = resource['Endpoint']['Port']
facts['availability_zone'] = resource['AvailabilityZone']
return facts
@ -261,13 +276,14 @@ def create_cluster(module, redshift):
node_type = module.params.get('node_type')
username = module.params.get('username')
password = module.params.get('password')
d_b_name = module.params.get('db_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
changed = True
# Package up the optional parameters
params = {}
for p in ('db_name', 'cluster_type', 'cluster_security_groups',
for p in ('cluster_type', 'cluster_security_groups',
'vpc_security_group_ids', 'cluster_subnet_group_name',
'availability_zone', 'preferred_maintenance_window',
'cluster_parameter_group_name',
@ -275,37 +291,41 @@ def create_cluster(module, redshift):
'cluster_version', 'allow_version_upgrade',
'number_of_nodes', 'publicly_accessible',
'encrypted', 'elastic_ip', 'enhanced_vpc_routing'):
if p in module.params:
# https://github.com/boto/boto3/issues/400
if module.params.get(p) is not None:
params[p] = module.params.get(p)
if d_b_name:
params['d_b_name'] = d_b_name
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
changed = False
except boto.exception.JSONResponseError as e:
except is_boto3_error_code('ClusterNotFound'):
try:
redshift.create_cluster(identifier, node_type, username, password, **params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
redshift.create_cluster(ClusterIdentifier=identifier,
NodeType=node_type,
MasterUsername=username,
MasterUserPassword=password,
**snake_dict_to_camel_dict(params, capitalize_first=True))
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to create cluster")
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to describe cluster")
if wait:
attempts = wait_timeout // 60
waiter = redshift.get_waiter('cluster_available')
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
waiter.wait(
ClusterIdentifier=identifier,
WaiterConfig=dict(MaxAttempts=attempts)
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Timeout waiting for the cluster creation")
try:
resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to describe cluster")
return(changed, _collect_facts(resource))
@ -320,9 +340,9 @@ def describe_cluster(module, redshift):
identifier = module.params.get('identifier')
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error describing cluster")
return(True, _collect_facts(resource))
@ -338,32 +358,35 @@ def delete_cluster(module, redshift):
identifier = module.params.get('identifier')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot')
final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier')
params = {}
for p in ('skip_final_cluster_snapshot',
'final_cluster_snapshot_identifier'):
if p in module.params:
# https://github.com/boto/boto3/issues/400
if module.params.get(p) is not None:
params[p] = module.params.get(p)
try:
redshift.delete_cluster(
identifier,
skip_final_cluster_snapshot,
final_cluster_snapshot_identifier
ClusterIdentifier=identifier,
**snake_dict_to_camel_dict(params, capitalize_first=True)
)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
except is_boto3_error_code('ClusterNotFound'):
return(False, {})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to delete cluster")
if wait:
attempts = wait_timeout // 60
waiter = redshift.get_waiter('cluster_deleted')
try:
wait_timeout = time.time() + wait_timeout
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
waiter.wait(
ClusterIdentifier=identifier,
WaiterConfig=dict(MaxAttempts=attempts)
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Timeout deleting the cluster")
return(True, {})
@ -387,39 +410,55 @@ def modify_cluster(module, redshift):
'availability_zone', 'preferred_maintenance_window',
'cluster_parameter_group_name',
'automated_snapshot_retention_period', 'port', 'cluster_version',
'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier',
'enhanced_vpc_routing'):
if p in module.params:
'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'):
# https://github.com/boto/boto3/issues/400
if module.params.get(p) is not None:
params[p] = module.params.get(p)
# enhanced_vpc_routing parameter change needs an exclusive request
if module.params.get('enhanced_vpc_routing') is not None:
try:
redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
redshift.modify_cluster(ClusterIdentifier=identifier,
EnhancedVpcRouting=module.params.get('enhanced_vpc_routing'))
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
if wait:
attempts = wait_timeout // 60
waiter = redshift.get_waiter('cluster_available')
try:
redshift.modify_cluster(identifier, **params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
waiter.wait(
ClusterIdentifier=identifier,
WaiterConfig=dict(MaxAttempts=attempts)
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e,
msg="Timeout waiting for cluster enhanced vpc routing modification"
)
# change the rest
try:
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
redshift.modify_cluster(ClusterIdentifier=identifier,
**snake_dict_to_camel_dict(params, capitalize_first=True))
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
if module.params.get('new_cluster_identifier'):
identifier = module.params.get('new_cluster_identifier')
if wait:
attempts = wait_timeout // 60
waiter2 = redshift.get_waiter('cluster_available')
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
except boto.exception.JSONResponseError as e:
# https://github.com/boto/boto/issues/2776 is fixed.
module.fail_json(msg=str(e))
waiter2.wait(
ClusterIdentifier=identifier,
WaiterConfig=dict(MaxAttempts=attempts)
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Timeout waiting for cluster modification")
try:
resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json(e, msg="Couldn't modify redshift cluster %s " % identifier)
return(True, _collect_facts(resource))
@ -429,22 +468,24 @@ def main():
argument_spec.update(dict(
command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
identifier=dict(required=True),
node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large',
'dc2.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large',
'dw2.8xlarge'], required=False),
node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge',
'ds2.8xlarge', 'dc1.large', 'dc2.large',
'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge',
'dw2.large', 'dw2.8xlarge'], required=False),
username=dict(required=False),
password=dict(no_log=True, required=False),
db_name=dict(require=False),
cluster_type=dict(choices=['multi-node', 'single-node', ], default='single-node'),
cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'),
cluster_security_groups=dict(aliases=['security_groups'], type='list'),
vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'),
skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], type='bool', default=False),
skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'],
type='bool', default=False),
final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
cluster_subnet_group_name=dict(aliases=['subnet']),
availability_zone=dict(aliases=['aws_zone', 'zone']),
preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
cluster_parameter_group_name=dict(aliases=['param_group_name']),
automated_snapshot_retention_period=dict(aliases=['retention_period']),
automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'),
port=dict(type='int'),
cluster_version=dict(aliases=['version'], choices=['1.0']),
allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
@ -460,28 +501,24 @@ def main():
required_if = [
('command', 'delete', ['skip_final_cluster_snapshot']),
('skip_final_cluster_snapshot', False, ['final_cluster_snapshot_identifier'])
('command', 'create', ['node_type',
'username',
'password'])
]
module = AnsibleModule(
module = AnsibleAWSModule(
argument_spec=argument_spec,
required_if=required_if
)
if not HAS_BOTO:
module.fail_json(msg='boto v2.9.0+ required for this module')
command = module.params.get('command')
skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot')
final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier')
# can't use module basic required_if check for this case
if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None:
module.fail_json(msg="Need to specifiy final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False")
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
# connect to the rds endpoint
try:
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
conn = module.client('redshift')
changed = True
if command == 'create':

View file

@ -0,0 +1,3 @@
cloud/aws
posix/ci/cloud/group4/aws
shippable/aws/group1

View file

@ -0,0 +1,6 @@
---
# defaults file for test_redshift
redshift_cluster_name: '{{ resource_prefix }}'
reshift_master_password: "th1s_is_A_test"
redshift_master_username: "master_user"
node_type: "dc2.large"

View file

@ -0,0 +1,3 @@
dependencies:
- prepare_tests
- setup_ec2

View file

@ -0,0 +1,276 @@
---
# A Note about ec2 environment variable name preference:
# - EC2_URL -> AWS_URL
# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
# - EC2_REGION -> AWS_REGION
#
- block:
- name: set connection information for all tasks
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token | default(omit) }}"
region: "{{ aws_region }}"
no_log: yes
# ============================================================
- name: test failure with no parameters
redshift:
<<: *aws_connection_info
register: result
ignore_errors: true
- name: assert failure with no parameters
assert:
that:
- 'result.failed'
- 'result.msg == "missing required arguments: command, identifier"'
# ============================================================
- name: test failure with only identifier
redshift:
identifier: '{{ redshift_cluster_name }}'
<<: *aws_connection_info
register: result
ignore_errors: true
- name: assert failure with only identifier
assert:
that:
- 'result.failed'
- 'result.msg == "missing required arguments: command"'
# ============================================================
- name: test create with no identifier
redshift:
command: create
<<: *aws_connection_info
register: result
ignore_errors: true
- name: assert failure with no identifier
assert:
that:
- 'result.failed'
- 'result.msg == "missing required arguments: identifier"'
# ============================================================
- name: test create with missing node_type
redshift:
command: create
identifier: "{{ redshift_cluster_name }}"
<<: *aws_connection_info
register: result
ignore_errors: true
- name: assert failure with missing node_type
assert:
that:
- 'result.failed'
- 'result.msg == "command is create but all of the following are missing: node_type, username, password"'
# ============================================================
- name: test create with missing username
redshift:
command: create
identifier: "{{ redshift_cluster_name }}"
username: "{{ redshift_master_username }}"
<<: *aws_connection_info
register: result
ignore_errors: true
- name: assert create failure with missing username
assert:
that:
- 'result.failed'
- 'result.msg == "command is create but all of the following are missing: node_type, password"'
# ============================================================
- name: test create with missing username
redshift:
command: create
identifier: "{{ redshift_cluster_name }}"
password: "{{ reshift_master_password }}"
<<: *aws_connection_info
register: result
ignore_errors: true
- name: assert create failure with missing username
assert:
that:
- 'result.failed'
- 'result.msg == "command is create but all of the following are missing: node_type, username"'
# ============================================================
- name: test create with default params
redshift:
command: create
identifier: "{{ redshift_cluster_name }}"
username: "{{ redshift_master_username }}"
password: "{{ reshift_master_password }}"
node_type: "{{ node_type }}"
wait: yes
wait_timeout: 1000
<<: *aws_connection_info
register: result
- debug:
msg: "{{ result }}"
verbosity: 1
- name: assert create success
assert:
that:
- 'result.changed'
- 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
# ============================================================
- name: test create again with default params
redshift:
command: create
identifier: "{{ redshift_cluster_name }}"
username: "{{ redshift_master_username }}"
password: "{{ reshift_master_password }}"
node_type: "{{ node_type }}"
<<: *aws_connection_info
register: result
- name: assert no change gets made to the existing cluster
assert:
that:
- 'not result.changed'
- 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
# ============================================================
- name: test modify cluster
redshift:
command: modify
identifier: "{{ redshift_cluster_name }}"
new_cluster_identifier: "{{ redshift_cluster_name }}-modified"
enhanced_vpc_routing: True
wait: yes
wait_timeout: 1000
<<: *aws_connection_info
register: result
- name: assert cluster was modified
assert:
that:
- 'result.changed'
- 'result.cluster.identifier == "{{ redshift_cluster_name }}-modified"'
- 'result.cluster.enhanced_vpc_routing == True'
# ============================================================
- name: test delete with no cluster identifier
redshift:
command: delete
<<: *aws_connection_info
register: result
ignore_errors: true
- name: assert failure with no identifier
assert:
that:
- 'result.failed'
- 'result.msg == "missing required arguments: identifier"'
# ============================================================
- name: test delete with no snapshot id
redshift:
command: delete
identifier: "{{ redshift_cluster_name }}"
<<: *aws_connection_info
register: result
ignore_errors: true
- name: assert failure for no snapshot identifier
assert:
that:
- 'result.failed'
- 'result.msg == "Need to specifiy final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False"'
# ============================================================
- name: test successful delete
redshift:
command: delete
identifier: "{{ redshift_cluster_name }}-modified"
skip_final_cluster_snapshot: true
wait: yes
wait_timeout: 1200
<<: *aws_connection_info
register: result
- name: assert delete
assert:
that:
- 'result.changed'
# ============================================================
- name: test create multi-node cluster with custom db-name
redshift:
command: create
identifier: "{{ redshift_cluster_name }}"
username: "{{ redshift_master_username }}"
password: "{{ reshift_master_password }}"
node_type: "{{ node_type }}"
cluster_type: multi-node
number_of_nodes: 3
wait: yes
db_name: "integration_test"
wait_timeout: 1800
<<: *aws_connection_info
register: result
- name: assert create
assert:
that:
- 'result.changed'
- 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
- 'result.cluster.db_name == "integration_test"'
# ============================================================
- name: test successful delete of multi-node cluster
redshift:
command: delete
identifier: "{{ redshift_cluster_name }}"
skip_final_cluster_snapshot: true
wait: yes
wait_timeout: 1200
<<: *aws_connection_info
register: result
- name: assert delete
assert:
that:
- 'result.changed'
always:
- name: Remove cluster if tests failed
redshift:
command: delete
identifier: "{{ item }}"
skip_final_cluster_snapshot: true
wait: yes
wait_timeout: 1200
<<: *aws_connection_info
register: cleanup
ignore_errors: yes
retries: 10
delay: 10
until: cleanup is success
loop:
- "{{ redshift_cluster_name }}"
- "{{ redshift_cluster_name }}-modified"