aws_eks_cluster: Add wait functionality (#42259)
* aws_eks_cluster: Improve output documentation This data is already returned by the module, it just wasn't documented. These fields are required for accessing the created Kubernetes API with e.g. the k8s_raw module. * aws_eks_cluster: Add wait functionality This enables further cluster configuration once it's created and active. 20 minutes was chosen as an arbitrary default, so that if it takes longer than the documented "usually less than 10 minutes" it's still likely to succeed. * Correct security group name in aws_eks tests * Improve teardown of aws_eks tests Fix minor teardown issues. The `pause` step is a placeholder until a waiter for `state: absent`
This commit is contained in:
parent
8590465d96
commit
6412cbf84b
4 changed files with 125 additions and 4 deletions
|
@ -179,6 +179,30 @@ waf_data = {
|
|||
}
|
||||
}
|
||||
|
||||
eks_data = {
|
||||
"version": 2,
|
||||
"waiters": {
|
||||
"ClusterActive": {
|
||||
"delay": 20,
|
||||
"maxAttempts": 60,
|
||||
"operation": "DescribeCluster",
|
||||
"acceptors": [
|
||||
{
|
||||
"state": "success",
|
||||
"matcher": "path",
|
||||
"argument": "cluster.status",
|
||||
"expected": "ACTIVE"
|
||||
},
|
||||
{
|
||||
"state": "retry",
|
||||
"matcher": "error",
|
||||
"expected": "ResourceNotFoundException"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def ec2_model(name):
|
||||
ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
|
||||
|
@ -190,6 +214,11 @@ def waf_model(name):
|
|||
return waf_models.get_waiter(name)
|
||||
|
||||
|
||||
def eks_model(name):
|
||||
eks_models = core_waiter.WaiterModel(waiter_config=eks_data)
|
||||
return eks_models.get_waiter(name)
|
||||
|
||||
|
||||
waiters_by_name = {
|
||||
('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
|
||||
'route_table_exists',
|
||||
|
@ -251,6 +280,12 @@ waiters_by_name = {
|
|||
core_waiter.NormalizedOperationMethod(
|
||||
waf.get_change_token_status
|
||||
)),
|
||||
('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
|
||||
'cluster_active',
|
||||
eks_model('ClusterActive'),
|
||||
core_waiter.NormalizedOperationMethod(
|
||||
eks.describe_cluster
|
||||
)),
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -36,6 +36,17 @@ options:
|
|||
- absent
|
||||
- present
|
||||
default: present
|
||||
wait:
|
||||
description: >-
|
||||
Specifies whether the module waits until the cluster becomes active after
|
||||
creation. It takes "usually less than 10 minutes" per AWS documentation.
|
||||
type: bool
|
||||
default: 'no'
|
||||
wait_timeout:
|
||||
description: >-
|
||||
The duration in seconds to wait for the cluster to become active. Defaults
|
||||
to 1200 seconds (20 minutes).
|
||||
default: 1200
|
||||
|
||||
|
||||
requirements: [ 'botocore', 'boto3' ]
|
||||
|
@ -72,10 +83,19 @@ arn:
|
|||
type: string
|
||||
sample: arn:aws:eks:us-west-2:111111111111:cluster/my-eks-cluster
|
||||
certificate_authority:
|
||||
description: Certificate Authority Data for cluster
|
||||
description: Dictionary containing Certificate Authority Data for cluster
|
||||
returned: after creation
|
||||
type: complex
|
||||
contains: {}
|
||||
contains:
|
||||
data:
|
||||
description: Base-64 encoded Certificate Authority Data for cluster
|
||||
returned: when the cluster has been created and is active
|
||||
type: string
|
||||
endpoint:
|
||||
description: Kubernetes API server endpoint
|
||||
returned: when the cluster has been created and is active
|
||||
type: string
|
||||
sample: https://API_SERVER_ENDPOINT.yl4.us-west-2.eks.amazonaws.com
|
||||
created_at:
|
||||
description: Cluster creation date and time
|
||||
returned: when state is present
|
||||
|
@ -120,7 +140,9 @@ status:
|
|||
description: status of the EKS cluster
|
||||
returned: when state is present
|
||||
type: string
|
||||
sample: CREATING
|
||||
sample:
|
||||
- CREATING
|
||||
- ACTIVE
|
||||
version:
|
||||
description: Kubernetes version of the cluster
|
||||
returned: when state is present
|
||||
|
@ -131,6 +153,7 @@ version:
|
|||
|
||||
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
|
||||
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names
|
||||
from ansible.module_utils.aws.waiters import get_waiter
|
||||
|
||||
try:
|
||||
import botocore.exceptions
|
||||
|
@ -142,6 +165,7 @@ def ensure_present(client, module):
|
|||
name = module.params.get('name')
|
||||
subnets = module.params['subnets']
|
||||
groups = module.params['security_groups']
|
||||
wait = module.params.get('wait')
|
||||
cluster = get_cluster(client, module)
|
||||
try:
|
||||
ec2 = module.client('ec2')
|
||||
|
@ -157,6 +181,13 @@ def ensure_present(client, module):
|
|||
module.fail_json(msg="Cannot modify security groups of existing cluster")
|
||||
if module.params.get('version') and module.params.get('version') != cluster['version']:
|
||||
module.fail_json(msg="Cannot modify version of existing cluster")
|
||||
|
||||
if wait:
|
||||
wait_until_cluster_active(client, module)
|
||||
# Ensure that fields that are only available for active clusters are
|
||||
# included in the returned value
|
||||
cluster = get_cluster(client, module)
|
||||
|
||||
module.exit_json(changed=False, **camel_dict_to_snake_dict(cluster))
|
||||
|
||||
if module.check_mode:
|
||||
|
@ -175,6 +206,13 @@ def ensure_present(client, module):
|
|||
module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
|
||||
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Couldn't create cluster %s" % name)
|
||||
|
||||
if wait:
|
||||
wait_until_cluster_active(client, module)
|
||||
# Ensure that fields that are only available for active clusters are
|
||||
# included in the returned value
|
||||
cluster = get_cluster(client, module)
|
||||
|
||||
module.exit_json(changed=True, **camel_dict_to_snake_dict(cluster))
|
||||
|
||||
|
||||
|
@ -205,6 +243,15 @@ def get_cluster(client, module):
|
|||
module.fail_json(e, msg="Couldn't get cluster %s" % name)
|
||||
|
||||
|
||||
def wait_until_cluster_active(client, module):
|
||||
name = module.params.get('name')
|
||||
wait_timeout = module.params.get('wait_timeout')
|
||||
|
||||
waiter = get_waiter(client, 'cluster_active')
|
||||
attempts = 1 + int(wait_timeout / waiter.config.delay)
|
||||
waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts})
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
name=dict(required=True),
|
||||
|
@ -213,6 +260,8 @@ def main():
|
|||
subnets=dict(type='list'),
|
||||
security_groups=dict(type='list'),
|
||||
state=dict(choices=['absent', 'present'], default='present'),
|
||||
wait=dict(default=False, type='bool'),
|
||||
wait_timeout=dict(default=1200, type='int')
|
||||
)
|
||||
|
||||
module = AnsibleAWSModule(
|
||||
|
|
|
@ -21,7 +21,7 @@ eks_security_groups:
|
|||
from_port: 1025
|
||||
to_port: 65535
|
||||
proto: tcp
|
||||
- name: "{{ eks_cluster_name }}-worker-sg"
|
||||
- name: "{{ eks_cluster_name }}-workers-sg"
|
||||
description: "EKS Worker Security Group"
|
||||
rules:
|
||||
- group_name: "{{ eks_cluster_name }}-workers-sg"
|
||||
|
|
|
@ -106,6 +106,27 @@
|
|||
- eks_create is changed
|
||||
- eks_create.name == eks_cluster_name
|
||||
|
||||
- name: create EKS cluster with same details but wait for it to become active
|
||||
aws_eks_cluster:
|
||||
name: "{{ eks_cluster_name }}"
|
||||
security_groups: "{{ eks_security_groups | json_query('[].name') }}"
|
||||
subnets: "{{ setup_subnets.results | json_query('[].subnet.id') }}"
|
||||
role_arn: "{{ iam_role.arn }}"
|
||||
wait: yes
|
||||
<<: *aws_connection_info
|
||||
register: eks_create
|
||||
|
||||
- name: Check that EKS cluster is active and has CA and endpoint data
|
||||
assert:
|
||||
that:
|
||||
- eks_create is not changed
|
||||
- eks_create.name == eks_cluster_name
|
||||
- eks_create.status == "ACTIVE"
|
||||
- eks_create.certificate_authority.data is defined
|
||||
- eks_create.certificate_authority.data != ""
|
||||
- eks_create.endpoint is defined
|
||||
- eks_create.endpoint != ""
|
||||
|
||||
- name: create EKS cluster with same details but using SG ids
|
||||
aws_eks_cluster:
|
||||
name: "{{ eks_cluster_name }}"
|
||||
|
@ -146,6 +167,9 @@
|
|||
register: eks_delete
|
||||
ignore_errors: yes
|
||||
|
||||
- pause:
|
||||
minutes: 5
|
||||
|
||||
- debug:
|
||||
msg: "{{ eks_security_groups|reverse|list }}"
|
||||
|
||||
|
@ -154,6 +178,19 @@
|
|||
additional_eks_sg:
|
||||
- name: "{{ eks_cluster_name }}-workers-sg"
|
||||
|
||||
- name: set all security group rule lists to empty to remove circular dependency
|
||||
ec2_group:
|
||||
name: "{{ item.name }}"
|
||||
description: "{{ item.description }}"
|
||||
state: present
|
||||
rules: []
|
||||
rules_egress: []
|
||||
purge_rules: yes
|
||||
purge_rules_egress: yes
|
||||
vpc_id: '{{ setup_vpc.vpc.id }}'
|
||||
<<: *aws_connection_info
|
||||
with_items: "{{ eks_security_groups }}"
|
||||
|
||||
- name: remove security groups
|
||||
ec2_group:
|
||||
name: '{{ item.name }}'
|
||||
|
|
Loading…
Reference in a new issue