PEP 8 E111 cleanup. (#20841)
This commit is contained in:
parent
462ab6b1db
commit
23f2efbc8d
7 changed files with 55 additions and 56 deletions
|
@ -257,7 +257,7 @@ class ElbManager:
|
|||
are attached to self.instance_id"""
|
||||
|
||||
if not ec2_elbs:
|
||||
ec2_elbs = self._get_auto_scaling_group_lbs()
|
||||
ec2_elbs = self._get_auto_scaling_group_lbs()
|
||||
|
||||
try:
|
||||
elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
|
||||
|
@ -293,24 +293,24 @@ class ElbManager:
|
|||
indirectly through its auto scaling group membership"""
|
||||
|
||||
try:
|
||||
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
|
||||
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
|
||||
if len(asg_instances) > 1:
|
||||
self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
|
||||
self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
|
||||
|
||||
if not asg_instances:
|
||||
asg_elbs = []
|
||||
asg_elbs = []
|
||||
else:
|
||||
asg_name = asg_instances[0].group_name
|
||||
asg_name = asg_instances[0].group_name
|
||||
|
||||
asgs = asg.get_all_groups([asg_name])
|
||||
if len(asg_instances) != 1:
|
||||
self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
|
||||
asgs = asg.get_all_groups([asg_name])
|
||||
if len(asg_instances) != 1:
|
||||
self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
|
||||
|
||||
asg_elbs = asgs[0].load_balancers
|
||||
asg_elbs = asgs[0].load_balancers
|
||||
|
||||
return asg_elbs
|
||||
|
||||
|
|
|
@ -577,13 +577,13 @@ class ElbManager(object):
|
|||
|
||||
# instance state counts: InService or OutOfService
|
||||
if info['instance_health']:
|
||||
for instance_state in info['instance_health']:
|
||||
if instance_state['state'] == "InService":
|
||||
info['in_service_count'] += 1
|
||||
elif instance_state['state'] == "OutOfService":
|
||||
info['out_of_service_count'] += 1
|
||||
else:
|
||||
info['unknown_instance_state_count'] += 1
|
||||
for instance_state in info['instance_health']:
|
||||
if instance_state['state'] == "InService":
|
||||
info['in_service_count'] += 1
|
||||
elif instance_state['state'] == "OutOfService":
|
||||
info['out_of_service_count'] += 1
|
||||
else:
|
||||
info['unknown_instance_state_count'] += 1
|
||||
|
||||
if check_elb.health_check:
|
||||
info['health_check'] = {
|
||||
|
@ -940,7 +940,7 @@ class ElbManager(object):
|
|||
attributes = self.elb.get_attributes()
|
||||
if self.access_logs:
|
||||
if 's3_location' not in self.access_logs:
|
||||
self.module.fail_json(msg='s3_location information required')
|
||||
self.module.fail_json(msg='s3_location information required')
|
||||
|
||||
access_logs_config = {
|
||||
"enabled": True,
|
||||
|
@ -951,7 +951,7 @@ class ElbManager(object):
|
|||
|
||||
update_access_logs_config = False
|
||||
for attr, desired_value in access_logs_config.items():
|
||||
if getattr(attributes.access_log, attr) != desired_value:
|
||||
if getattr(attributes.access_log, attr) != desired_value:
|
||||
setattr(attributes.access_log, attr, desired_value)
|
||||
update_access_logs_config = True
|
||||
if update_access_logs_config:
|
||||
|
|
|
@ -119,13 +119,13 @@ result:
|
|||
'''
|
||||
|
||||
try:
|
||||
import json
|
||||
import time
|
||||
import botocore
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
import json
|
||||
import time
|
||||
import botocore
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
HAS_BOTO3 = False
|
||||
|
||||
def get_vgw_info(vgws):
|
||||
if not isinstance(vgws, list):
|
||||
|
|
|
@ -469,9 +469,9 @@ def main():
|
|||
|
||||
if command_in == 'create':
|
||||
if ( weight_in is not None or region_in is not None or failover_in is not None ) and identifier_in is None:
|
||||
module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier")
|
||||
module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier")
|
||||
elif ( weight_in is None and region_in is None and failover_in is None ) and identifier_in is not None:
|
||||
module.fail_json(msg= "You have specified identifier which makes sense only if you specify one of: weight, region or failover.")
|
||||
module.fail_json(msg= "You have specified identifier which makes sense only if you specify one of: weight, region or failover.")
|
||||
|
||||
|
||||
|
||||
|
@ -536,15 +536,15 @@ def main():
|
|||
if hosted_zone_id_in:
|
||||
record['hosted_zone_id'] = hosted_zone_id_in
|
||||
if rset.alias_dns_name:
|
||||
record['alias'] = True
|
||||
record['value'] = rset.alias_dns_name
|
||||
record['values'] = [rset.alias_dns_name]
|
||||
record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id
|
||||
record['alias_evaluate_target_health'] = rset.alias_evaluate_target_health
|
||||
record['alias'] = True
|
||||
record['value'] = rset.alias_dns_name
|
||||
record['values'] = [rset.alias_dns_name]
|
||||
record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id
|
||||
record['alias_evaluate_target_health'] = rset.alias_evaluate_target_health
|
||||
else:
|
||||
record['alias'] = False
|
||||
record['value'] = ','.join(sorted(rset.resource_records))
|
||||
record['values'] = sorted(rset.resource_records)
|
||||
record['alias'] = False
|
||||
record['value'] = ','.join(sorted(rset.resource_records))
|
||||
record['values'] = sorted(rset.resource_records)
|
||||
if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml():
|
||||
module.exit_json(changed=False)
|
||||
break
|
||||
|
@ -578,9 +578,9 @@ def main():
|
|||
txt = e.body.split("<Message>")[1]
|
||||
txt = txt.split("</Message>")[0]
|
||||
if "but it already exists" in txt:
|
||||
module.exit_json(changed=False)
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.fail_json(msg = txt)
|
||||
module.fail_json(msg = txt)
|
||||
except TimeoutError:
|
||||
module.fail_json(msg='Timeout waiting for changes to replicate')
|
||||
|
||||
|
|
|
@ -306,12 +306,12 @@ def main():
|
|||
|
||||
# Default port
|
||||
if port_in is None:
|
||||
if type_in in ['HTTP', 'HTTP_STR_MATCH']:
|
||||
port_in = 80
|
||||
elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']:
|
||||
port_in = 443
|
||||
else:
|
||||
module.fail_json(msg="parameter 'port' is required for 'type' TCP")
|
||||
if type_in in ['HTTP', 'HTTP_STR_MATCH']:
|
||||
port_in = 80
|
||||
elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']:
|
||||
port_in = 443
|
||||
else:
|
||||
module.fail_json(msg="parameter 'port' is required for 'type' TCP")
|
||||
|
||||
# string_match in relation with type
|
||||
if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
|
||||
|
|
|
@ -615,21 +615,21 @@ def main():
|
|||
|
||||
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
|
||||
if bucketrtn is True and keyrtn is True:
|
||||
md5_remote = keysum(module, s3, bucket, obj)
|
||||
md5_local = module.md5(src)
|
||||
md5_remote = keysum(module, s3, bucket, obj)
|
||||
md5_local = module.md5(src)
|
||||
|
||||
if md5_local == md5_remote:
|
||||
sum_matches = True
|
||||
if overwrite == 'always':
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
|
||||
else:
|
||||
get_download_url(module, s3, bucket, obj, expiry, changed=False)
|
||||
if md5_local == md5_remote:
|
||||
sum_matches = True
|
||||
if overwrite == 'always':
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
|
||||
else:
|
||||
sum_matches = False
|
||||
if overwrite in ('always', 'different'):
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
|
||||
else:
|
||||
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
|
||||
get_download_url(module, s3, bucket, obj, expiry, changed=False)
|
||||
else:
|
||||
sum_matches = False
|
||||
if overwrite in ('always', 'different'):
|
||||
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
|
||||
else:
|
||||
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
|
||||
|
||||
# If neither exist (based on bucket existence), we can create both.
|
||||
if bucketrtn is False and pathrtn is True:
|
||||
|
|
|
@ -61,7 +61,6 @@ lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py
|
|||
lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py
|
||||
lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py
|
||||
lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py
|
||||
lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
|
||||
lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py
|
||||
lib/ansible/modules/cloud/amazon/ec2_win_password.py
|
||||
lib/ansible/modules/cloud/amazon/ecs_service.py
|
||||
|
|
Loading…
Reference in a new issue