--- # tasks file for ecs_cluster - block: # ============================================================ - name: set up aws connection info set_fact: aws_connection_info: &aws_connection_info aws_access_key: "{{ aws_access_key }}" aws_secret_key: "{{ aws_secret_key }}" security_token: "{{ security_token }}" region: "{{ aws_region }}" no_log: yes - name: ensure IAM instance role exists iam_role: name: ecsInstanceRole assume_role_policy_document: "{{ lookup('file','ec2-trust-policy.json') }}" state: present create_instance_profile: yes managed_policy: - AmazonEC2ContainerServiceforEC2Role <<: *aws_connection_info - name: ensure IAM service role exists iam_role: name: ecsServiceRole assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" state: present create_instance_profile: no managed_policy: - AmazonEC2ContainerServiceRole <<: *aws_connection_info - name: ensure AWSServiceRoleForECS role exists iam_role_info: name: AWSServiceRoleForECS <<: *aws_connection_info register: iam_role_result # FIXME: come up with a way to automate this - name: fail if AWSServiceRoleForECS role does not exist fail: msg: > Run `aws iam create-service-linked-role --aws-service-name=ecs.amazonaws.com ` to create a linked role for AWS VPC load balancer management when: not iam_role_result.iam_roles - name: create an ECS cluster ecs_cluster: name: "{{ ecs_cluster_name }}" state: present <<: *aws_connection_info register: ecs_cluster - name: check that ecs_cluster changed assert: that: - ecs_cluster.changed - name: create same ECS cluster (should do nothing) ecs_cluster: name: "{{ ecs_cluster_name }}" state: present <<: *aws_connection_info register: ecs_cluster_again - name: check that ecs_cluster did not change assert: that: - not ecs_cluster_again.changed - name: create a VPC to work in ec2_vpc_net: cidr_block: 10.0.0.0/16 state: present name: '{{ resource_prefix }}_ecs_cluster' resource_tags: Name: '{{ resource_prefix }}_ecs_cluster' <<: *aws_connection_info register: setup_vpc - name: create a key pair to use for creating an ec2 instance ec2_key: name: '{{ resource_prefix }}_ecs_cluster' state: present <<: *aws_connection_info when: ec2_keypair is not defined # allow override in cloud-config-aws.ini register: setup_key - name: create subnets ec2_vpc_subnet: az: '{{ ec2_region }}{{ item.zone }}' tags: Name: '{{ resource_prefix }}_ecs_cluster-subnet-{{ item.zone }}' vpc_id: '{{ setup_vpc.vpc.id }}' cidr: "{{ item.cidr }}" state: present <<: *aws_connection_info register: setup_subnet with_items: - zone: a cidr: 10.0.1.0/24 - zone: b cidr: 10.0.2.0/24 - name: create an internet gateway so that ECS agents can talk to ECS ec2_vpc_igw: vpc_id: '{{ setup_vpc.vpc.id }}' state: present <<: *aws_connection_info register: igw - name: create a security group to use for creating an ec2 instance ec2_group: name: '{{ resource_prefix }}_ecs_cluster-sg' description: 'created by Ansible integration tests' state: present vpc_id: '{{ setup_vpc.vpc.id }}' rules: # allow all ssh traffic but nothing else - ports: 22 cidr: 0.0.0.0/0 <<: *aws_connection_info register: setup_sg - name: find a suitable AMI ec2_ami_info: owner: amazon filters: description: "Amazon Linux AMI* ECS *" <<: *aws_connection_info register: ec2_ami_info - name: set image id fact set_fact: ecs_image_id: "{{ (ec2_ami_info.images|first).image_id }}" - name: provision ec2 instance to create an image ec2_instance: key_name: '{{ ec2_keypair|default(setup_key.key.name) }}' instance_type: t2.micro state: present image_id: '{{ ecs_image_id }}' wait: yes user_data: "{{ user_data }}" instance_role: ecsInstanceRole tags: Name: '{{ resource_prefix }}_ecs_agent' security_group: '{{ setup_sg.group_id }}' vpc_subnet_id: '{{ setup_subnet.results[0].subnet.id }}' <<: *aws_connection_info register: setup_instance - name: create target group elb_target_group: name: "{{ ecs_target_group_name }}1" state: present protocol: HTTP port: 8080 modify_targets: no vpc_id: '{{ setup_vpc.vpc.id }}' target_type: instance <<: *aws_connection_info register: elb_target_group_instance - name: create second target group to use ip target_type elb_target_group: name: "{{ ecs_target_group_name }}2" state: present protocol: HTTP port: 8080 modify_targets: no vpc_id: '{{ setup_vpc.vpc.id }}' target_type: ip <<: *aws_connection_info register: elb_target_group_ip - name: create load balancer elb_application_lb: name: "{{ ecs_load_balancer_name }}" state: present scheme: internal security_groups: '{{ setup_sg.group_id }}' subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}" listeners: - Protocol: HTTP Port: 80 DefaultActions: - Type: forward TargetGroupName: "{{ ecs_target_group_name }}1" - Protocol: HTTP Port: 81 DefaultActions: - Type: forward TargetGroupName: "{{ ecs_target_group_name }}2" <<: *aws_connection_info - name: create task definition ecs_taskdefinition: containers: "{{ ecs_task_containers }}" family: "{{ ecs_task_name }}" state: present <<: *aws_connection_info register: ecs_task_definition - name: recreate task definition ecs_taskdefinition: containers: "{{ ecs_task_containers }}" family: "{{ ecs_task_name }}" state: present <<: *aws_connection_info register: ecs_task_definition_again - name: check that task definition does not change assert: that: - not ecs_task_definition_again.changed # FIXME: task definition should not change, will need #26752 or equivalent ignore_errors: yes - name: obtain ECS task definition facts ecs_taskdefinition_info: task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" <<: *aws_connection_info - name: create ECS service definition ecs_service: state: present name: "{{ ecs_service_name }}" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" desired_count: 1 deployment_configuration: "{{ ecs_service_deployment_configuration }}" placement_strategy: "{{ ecs_service_placement_strategy }}" health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" load_balancers: - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" role: "ecsServiceRole" <<: *aws_connection_info register: ecs_service - name: check that ECS service creation changed assert: that: - ecs_service.changed - name: create same ECS service definition (should not change) ecs_service: state: present name: "{{ ecs_service_name }}" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" desired_count: 1 deployment_configuration: "{{ ecs_service_deployment_configuration }}" placement_strategy: "{{ ecs_service_placement_strategy }}" health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" load_balancers: - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" role: "ecsServiceRole" <<: *aws_connection_info register: ecs_service_again - name: check that ECS service recreation changed nothing assert: that: - not ecs_service_again.changed # FIXME: service should not change, needs fixing ignore_errors: yes # FIXME: attempt to update service load balancer - name: update ECS service definition (expected to fail) ecs_service: state: present name: "{{ ecs_service_name }}" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" desired_count: 1 deployment_configuration: "{{ ecs_service_deployment_configuration }}" placement_strategy: "{{ ecs_service_placement_strategy }}" health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" load_balancers: - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port|int + 1 }}" role: "ecsServiceRole" <<: *aws_connection_info register: update_ecs_service ignore_errors: yes - name: assert that updating ECS load balancer failed with helpful message assert: that: - update_ecs_service is failed - "'error' not in update_ecs_service" - "'msg' in update_ecs_service" - name: attempt to use ECS network configuration on task definition without awsvpc network_mode ecs_service: state: present name: "{{ ecs_service_name }}3" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" desired_count: 1 deployment_configuration: "{{ ecs_service_deployment_configuration }}" placement_strategy: "{{ ecs_service_placement_strategy }}" load_balancers: - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" network_configuration: subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}" security_groups: - '{{ setup_sg.group_id }}' <<: *aws_connection_info register: ecs_service_network_without_awsvpc_task ignore_errors: yes - name: assert that using ECS network configuration with non AWSVPC task definition fails assert: that: - ecs_service_network_without_awsvpc_task is failed - name: scale down ECS service ecs_service: state: present name: "{{ ecs_service_name }}" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" desired_count: 0 deployment_configuration: "{{ ecs_service_deployment_configuration }}" placement_strategy: "{{ ecs_service_placement_strategy }}" load_balancers: - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" role: "ecsServiceRole" <<: *aws_connection_info register: ecs_service_scale_down - name: pause to allow service to scale down pause: seconds: 60 - name: delete ECS service definition ecs_service: state: absent name: "{{ ecs_service_name }}" cluster: "{{ ecs_cluster_name }}" <<: *aws_connection_info register: delete_ecs_service - name: assert that deleting ECS service worked assert: that: - delete_ecs_service.changed - name: assert that deleting ECS service worked assert: that: - delete_ecs_service.changed - name: create VPC-networked task definition with host port set to 0 (expected to fail) ecs_taskdefinition: containers: "{{ ecs_task_containers }}" family: "{{ ecs_task_name }}-vpc" state: present network_mode: awsvpc <<: *aws_connection_info register: ecs_task_definition_vpc_no_host_port ignore_errors: yes - name: check that awsvpc task definition with host port 0 fails gracefully assert: that: - ecs_task_definition_vpc_no_host_port is failed - "'error' not in ecs_task_definition_vpc_no_host_port" - name: create VPC-networked task definition with host port set to 8080 ecs_taskdefinition: containers: "{{ ecs_task_containers }}" family: "{{ ecs_task_name }}-vpc" network_mode: awsvpc state: present <<: *aws_connection_info vars: ecs_task_host_port: 8080 register: ecs_task_definition_vpc_with_host_port - name: obtain ECS task definition facts ecs_taskdefinition_info: task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" <<: *aws_connection_info register: ecs_taskdefinition_info - name: assert that network mode is awsvpc assert: that: - "ecs_taskdefinition_info.network_mode == 'awsvpc'" - name: pause to allow service to scale down pause: seconds: 60 - name: delete ECS service definition ecs_service: state: absent name: "{{ ecs_service_name }}4" cluster: "{{ ecs_cluster_name }}" <<: *aws_connection_info register: delete_ecs_service - name: create ECS service definition with network configuration ecs_service: state: present name: "{{ ecs_service_name }}2" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" desired_count: 1 deployment_configuration: "{{ ecs_service_deployment_configuration }}" placement_strategy: "{{ ecs_service_placement_strategy }}" load_balancers: - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" network_configuration: subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}" security_groups: - '{{ setup_sg.group_id }}' <<: *aws_connection_info register: create_ecs_service_with_vpc - name: assert that network configuration is correct assert: that: - "'networkConfiguration' in create_ecs_service_with_vpc.service" - "'awsvpcConfiguration' in create_ecs_service_with_vpc.service.networkConfiguration" - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2" - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1" - name: create dummy group to update ECS service with ec2_group: name: "{{ resource_prefix }}-ecs-vpc-test-sg" description: "Test security group for ECS with VPC" vpc_id: '{{ setup_vpc.vpc.id }}' state: present <<: *aws_connection_info - name: update ECS service definition with new network configuration ecs_service: state: present name: "{{ ecs_service_name }}2" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" desired_count: 1 deployment_configuration: "{{ ecs_service_deployment_configuration }}" placement_strategy: "{{ ecs_service_placement_strategy }}" load_balancers: - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" network_configuration: subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}" security_groups: - "{{ resource_prefix }}-ecs-vpc-test-sg" <<: *aws_connection_info register: update_ecs_service_with_vpc - name: check that ECS service changed assert: that: - update_ecs_service_with_vpc.changed - "'networkConfiguration' in update_ecs_service_with_vpc.service" - "'awsvpcConfiguration' in update_ecs_service_with_vpc.service.networkConfiguration" - "update_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2" - "update_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1" - name: create ecs_service using health_check_grace_period_seconds ecs_service: name: "{{ ecs_service_name }}-mft" cluster: "{{ ecs_cluster_name }}" load_balancers: - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" scheduling_strategy: "REPLICA" health_check_grace_period_seconds: 10 desired_count: 1 state: present <<: *aws_connection_info register: ecs_service_creation_hcgp - name: health_check_grace_period_seconds sets HealthChecGracePeriodSeconds assert: that: - ecs_service_creation_hcgp.changed - "{{ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds}} == 10" - name: update ecs_service using health_check_grace_period_seconds ecs_service: name: "{{ ecs_service_name }}-mft" cluster: "{{ ecs_cluster_name }}" load_balancers: - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" desired_count: 1 health_check_grace_period_seconds: 30 state: present <<: *aws_connection_info register: ecs_service_creation_hcgp2 ignore_errors: no - name: check that module returns success assert: that: - ecs_service_creation_hcgp2.changed - "{{ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds}} == 30" # until ansible supports service registries, this test can't run. # - name: update ecs_service using service_registries # ecs_service: # name: "{{ ecs_service_name }}-service-registries" # cluster: "{{ ecs_cluster_name }}" # load_balancers: # - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" # containerName: "{{ ecs_task_name }}" # containerPort: "{{ ecs_task_container_port }}" # service_registries: # - containerName: "{{ ecs_task_name }}" # containerPort: "{{ ecs_task_container_port }}" # ### TODO: Figure out how to get a service registry ARN without a service registry module. # registryArn: "{{ ecs_task_service_registry_arn }}" # task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" # desired_count: 1 # state: present # <<: *aws_connection_info # register: ecs_service_creation_sr # ignore_errors: yes # - name: dump sr output # debug: var=ecs_service_creation_sr # - name: check that module returns success # assert: # that: # - ecs_service_creation_sr.changed - name: update ecs_service using REPLICA scheduling_strategy ecs_service: name: "{{ ecs_service_name }}-replica" cluster: "{{ ecs_cluster_name }}" load_balancers: - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" scheduling_strategy: "REPLICA" task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" desired_count: 1 state: present <<: *aws_connection_info register: ecs_service_creation_replica - name: obtain facts for all ECS services in the cluster ecs_service_facts: cluster: "{{ ecs_cluster_name }}" details: yes events: no <<: *aws_connection_info register: ecs_service_facts - name: assert that facts are useful assert: that: - "'services' in ecs_service_facts" - ecs_service_facts.services | length > 0 - "'events' not in ecs_service_facts.services[0]" - name: obtain facts for existing service in the cluster ecs_service_facts: cluster: "{{ ecs_cluster_name }}" service: "{{ ecs_service_name }}" details: yes events: no <<: *aws_connection_info register: ecs_service_facts - name: assert that existing service is available and running assert: that: - "ecs_service_facts.services|length == 1" - "ecs_service_facts.services_not_running|length == 0" - name: obtain facts for non-existent service in the cluster ecs_service_facts: cluster: "{{ ecs_cluster_name }}" service: madeup details: yes events: no <<: *aws_connection_info register: ecs_service_facts - name: assert that non-existent service is missing assert: that: - "ecs_service_facts.services_not_running[0].reason == 'MISSING'" - name: obtain specific ECS service facts ecs_service_facts: service: "{{ ecs_service_name }}2" cluster: "{{ ecs_cluster_name }}" details: yes <<: *aws_connection_info register: ecs_service_facts - name: check that facts contain network configuration assert: that: - "'networkConfiguration' in ecs_service_facts.ansible_facts.services[0]" - name: attempt to get facts from missing task definition ecs_taskdefinition_info: task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition.taskdefinition.revision + 1}}" <<: *aws_connection_info # ============================================================ # Begin tests for Fargate - name: ensure AmazonECSTaskExecutionRolePolicy exists iam_role: name: ecsTaskExecutionRole assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" description: "Allows ECS containers to make calls to ECR" state: present create_instance_profile: no managed_policy: - AmazonEC2ContainerServiceRole <<: *aws_connection_info register: iam_execution_role - name: create Fargate VPC-networked task definition with host port set to 8080 and unsupported network mode (expected to fail) ecs_taskdefinition: containers: "{{ ecs_fargate_task_containers }}" family: "{{ ecs_task_name }}-vpc" network_mode: bridge launch_type: FARGATE cpu: 512 memory: 1024 state: present <<: *aws_connection_info vars: ecs_task_host_port: 8080 ignore_errors: yes register: ecs_fargate_task_definition_bridged_with_host_port - name: check that fargate task definition with bridged networking fails gracefully assert: that: - ecs_fargate_task_definition_bridged_with_host_port is failed - 'ecs_fargate_task_definition_bridged_with_host_port.msg == "To use FARGATE launch type, network_mode must be awsvpc"' - name: create Fargate VPC-networked task definition without CPU or Memory (expected to Fail) ecs_taskdefinition: containers: "{{ ecs_fargate_task_containers }}" family: "{{ ecs_task_name }}-vpc" network_mode: awsvpc launch_type: FARGATE state: present <<: *aws_connection_info ignore_errors: yes register: ecs_fargate_task_definition_vpc_no_mem - name: check that fargate task definition without memory or cpu fails gracefully assert: that: - ecs_fargate_task_definition_vpc_no_mem is failed - 'ecs_fargate_task_definition_vpc_no_mem.msg == "launch_type is FARGATE but all of the following are missing: cpu, memory"' - name: create Fargate VPC-networked task definition with CPU or Memory and execution role ecs_taskdefinition: containers: "{{ ecs_fargate_task_containers }}" family: "{{ ecs_task_name }}-vpc" network_mode: awsvpc launch_type: FARGATE cpu: 512 memory: 1024 execution_role_arn: "{{ iam_execution_role.arn }}" state: present <<: *aws_connection_info vars: ecs_task_host_port: 8080 register: ecs_fargate_task_definition - name: obtain ECS task definition facts ecs_taskdefinition_info: task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" <<: *aws_connection_info - name: create fargate ECS service without network config (expected to fail) ecs_service: state: present name: "{{ ecs_service_name }}4" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" desired_count: 1 deployment_configuration: "{{ ecs_service_deployment_configuration }}" launch_type: FARGATE <<: *aws_connection_info register: ecs_fargate_service_network_without_awsvpc ignore_errors: yes - name: assert that using Fargate ECS service fails assert: that: - ecs_fargate_service_network_without_awsvpc is failed - name: create fargate ECS service with network config ecs_service: state: present name: "{{ ecs_service_name }}4" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" desired_count: 1 deployment_configuration: "{{ ecs_service_deployment_configuration }}" launch_type: FARGATE network_configuration: subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}" security_groups: - '{{ setup_sg.group_id }}' assign_public_ip: true <<: *aws_connection_info register: ecs_fargate_service_network_with_awsvpc - name: create fargate ECS task with run task ecs_task: operation: run cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}-vpc" launch_type: FARGATE count: 1 network_configuration: subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}" security_groups: - '{{ setup_sg.group_id }}' assign_public_ip: true started_by: ansible_user <<: *aws_connection_info register: fargate_run_task_output - name: assert that public IP assignment is enabled assert: that: - 'ecs_fargate_service_network_with_awsvpc.service.networkConfiguration.awsvpcConfiguration.assignPublicIp == "ENABLED"' # ============================================================ # End tests for Fargate - name: create task definition for absent with arn regression test ecs_taskdefinition: containers: "{{ ecs_task_containers }}" family: "{{ ecs_task_name }}-absent" state: present <<: *aws_connection_info register: ecs_task_definition_absent_with_arn - name: absent task definition by arn ecs_taskdefinition: arn: "{{ ecs_task_definition_absent_with_arn.taskdefinition.taskDefinitionArn }}" state: absent <<: *aws_connection_info always: # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc - name: Announce teardown start debug: msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" - name: obtain ECS service facts ecs_service_facts: service: "{{ ecs_service_name }}" cluster: "{{ ecs_cluster_name }}" details: yes <<: *aws_connection_info register: ecs_service_facts - name: scale down ECS service ecs_service: state: present name: "{{ ecs_service_name }}" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_service_facts.ansible_facts.services[0].taskDefinition }}" desired_count: 0 deployment_configuration: "{{ ecs_service_deployment_configuration }}" placement_strategy: "{{ ecs_service_placement_strategy }}" load_balancers: - targetGroupArn: "{{ ecs_service_facts.ansible_facts.services[0].loadBalancers[0].targetGroupArn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" <<: *aws_connection_info ignore_errors: yes register: ecs_service_scale_down - name: obtain second ECS service facts ecs_service_facts: service: "{{ ecs_service_name }}2" cluster: "{{ ecs_cluster_name }}" details: yes <<: *aws_connection_info ignore_errors: yes register: ecs_service_facts - name: scale down second ECS service ecs_service: state: present name: "{{ ecs_service_name }}2" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_service_facts.ansible_facts.services[0].taskDefinition }}" desired_count: 0 deployment_configuration: "{{ ecs_service_deployment_configuration }}" placement_strategy: "{{ ecs_service_placement_strategy }}" load_balancers: - targetGroupArn: "{{ ecs_service_facts.ansible_facts.services[0].loadBalancers[0].targetGroupArn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" <<: *aws_connection_info ignore_errors: yes register: ecs_service_scale_down - name: scale down multifunction-test service ecs_service: name: "{{ ecs_service_name }}-mft" cluster: "{{ ecs_cluster_name }}" state: present load_balancers: - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" desired_count: 0 <<: *aws_connection_info ignore_errors: yes register: ecs_service_scale_down - name: scale down scheduling_strategy service ecs_service: name: "{{ ecs_service_name }}-replica" cluster: "{{ ecs_cluster_name }}" state: present load_balancers: - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" containerName: "{{ ecs_task_name }}" containerPort: "{{ ecs_task_container_port }}" task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" desired_count: 0 <<: *aws_connection_info ignore_errors: yes register: ecs_service_scale_down # until ansible supports service registries, the test for it can't run and this # scale down is not needed # - name: scale down service_registries service # ecs_service: # name: "{{ ecs_service_name }}-service-registries" # cluster: "{{ ecs_cluster_name }}" # state: present # load_balancers: # - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" # containerName: "{{ ecs_task_name }}" # containerPort: "{{ ecs_task_container_port }}" # task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" # desired_count: 0 # <<: *aws_connection_info # ignore_errors: yes # register: ecs_service_scale_down - name: scale down Fargate ECS service ecs_service: state: present name: "{{ ecs_service_name }}4" cluster: "{{ ecs_cluster_name }}" task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" desired_count: 0 deployment_configuration: "{{ ecs_service_deployment_configuration }}" <<: *aws_connection_info ignore_errors: yes register: ecs_service_scale_down - name: stop Fargate ECS task ecs_task: task: "{{ fargate_run_task_output.task[0].taskArn }}" task_definition: "{{ ecs_task_name }}-vpc" operation: stop cluster: "{{ ecs_cluster_name }}" <<: *aws_connection_info ignore_errors: yes - name: pause to allow services to scale down pause: seconds: 60 when: ecs_service_scale_down is not failed - name: remove ecs service ecs_service: state: absent cluster: "{{ ecs_cluster_name }}" name: "{{ ecs_service_name }}" <<: *aws_connection_info ignore_errors: yes - name: remove second ecs service ecs_service: state: absent cluster: "{{ ecs_cluster_name }}" name: "{{ ecs_service_name }}2" <<: *aws_connection_info ignore_errors: yes - name: remove mft ecs service ecs_service: state: absent cluster: "{{ ecs_cluster_name }}" name: "{{ ecs_service_name }}-mft" <<: *aws_connection_info ignore_errors: yes - name: remove scheduling_strategy ecs service ecs_service: state: absent cluster: "{{ ecs_cluster_name }}" name: "{{ ecs_service_name }}-replica" <<: *aws_connection_info ignore_errors: yes # until ansible supports service registries, the test for it can't run and this # removal is not needed # - name: remove service_registries ecs service # ecs_service: # state: absent # cluster: "{{ ecs_cluster_name }}" # name: "{{ ecs_service_name }}-service-registries" # <<: *aws_connection_info # ignore_errors: yes - name: remove fargate ECS service ecs_service: state: absent name: "{{ ecs_service_name }}4" cluster: "{{ ecs_cluster_name }}" <<: *aws_connection_info ignore_errors: yes register: ecs_fargate_service_network_with_awsvpc - name: remove ecs task definition ecs_taskdefinition: containers: "{{ ecs_task_containers }}" family: "{{ ecs_task_name }}" revision: "{{ ecs_task_definition.taskdefinition.revision }}" state: absent <<: *aws_connection_info vars: ecs_task_host_port: 8080 ignore_errors: yes - name: remove ecs task definition again ecs_taskdefinition: containers: "{{ ecs_task_containers }}" family: "{{ ecs_task_name }}" revision: "{{ ecs_task_definition_again.taskdefinition.revision }}" state: absent <<: *aws_connection_info vars: ecs_task_host_port: 8080 ignore_errors: yes - name: remove second ecs task definition ecs_taskdefinition: containers: "{{ ecs_task_containers }}" family: "{{ ecs_task_name }}-vpc" revision: "{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" state: absent <<: *aws_connection_info vars: ecs_task_host_port: 8080 ignore_errors: yes - name: remove fargate ecs task definition ecs_taskdefinition: containers: "{{ ecs_fargate_task_containers }}" family: "{{ ecs_task_name }}-vpc" revision: "{{ ecs_fargate_task_definition.taskdefinition.revision }}" state: absent <<: *aws_connection_info ignore_errors: yes - name: remove ecs task definition for absent with arn ecs_taskdefinition: containers: "{{ ecs_task_containers }}" family: "{{ ecs_task_name }}-absent" revision: "{{ ecs_task_definition_absent_with_arn.taskdefinition.revision }}" state: absent <<: *aws_connection_info ignore_errors: yes - name: remove load balancer elb_application_lb: name: "{{ ecs_load_balancer_name }}" state: absent wait: yes <<: *aws_connection_info ignore_errors: yes register: elb_application_lb_remove - name: pause to allow target group to be disassociated pause: seconds: 30 when: not elb_application_lb_remove is failed - name: remove target groups elb_target_group: name: "{{ item }}" state: absent <<: *aws_connection_info with_items: - "{{ ecs_target_group_name }}1" - "{{ ecs_target_group_name }}2" ignore_errors: yes - name: remove setup ec2 instance ec2_instance: instance_ids: '{{ setup_instance.instance_ids }}' state: absent wait: yes <<: *aws_connection_info ignore_errors: yes - name: remove setup keypair ec2_key: name: '{{ resource_prefix }}_ecs_cluster' state: absent <<: *aws_connection_info ignore_errors: yes - name: remove security groups ec2_group: name: '{{ item }}' description: 'created by Ansible integration tests' state: absent vpc_id: '{{ setup_vpc.vpc.id }}' <<: *aws_connection_info with_items: - "{{ resource_prefix }}-ecs-vpc-test-sg" - '{{ resource_prefix }}_ecs_cluster-sg' ignore_errors: yes - name: remove IGW ec2_vpc_igw: state: absent vpc_id: '{{ setup_vpc.vpc.id }}' <<: *aws_connection_info ignore_errors: yes - name: remove setup subnet ec2_vpc_subnet: az: '{{ aws_region }}{{ item.zone }}' vpc_id: '{{ setup_vpc.vpc.id }}' cidr: "{{ item.cidr}}" state: absent <<: *aws_connection_info with_items: - zone: a cidr: 10.0.1.0/24 - zone: b cidr: 10.0.2.0/24 ignore_errors: yes - name: remove setup VPC ec2_vpc_net: cidr_block: 10.0.0.0/16 state: absent name: '{{ resource_prefix }}_ecs_cluster' <<: *aws_connection_info ignore_errors: yes - name: remove ECS cluster ecs_cluster: name: "{{ ecs_cluster_name }}" state: absent <<: *aws_connection_info ignore_errors: yes