parent
d0d1158c5e
commit
e2c0b375d3
23 changed files with 51 additions and 57 deletions
|
@ -525,7 +525,7 @@ class AnsibleDockerClient(Client):
|
|||
msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
|
||||
"Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
|
||||
"You may also use TLS without verification by setting the tls parameter to true." \
|
||||
% (self.auth_params['tls_hostname'], match.group(1))
|
||||
% (self.auth_params['tls_hostname'], match.group(1))
|
||||
self.fail(msg)
|
||||
self.fail("SSL Exception: %s" % (error))
|
||||
|
||||
|
|
|
@ -330,7 +330,7 @@ class AnsibleDockerClient(Client):
|
|||
msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
|
||||
"Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
|
||||
"You may also use TLS without verification by setting the tls parameter to true." \
|
||||
% (self.auth_params['tls_hostname'], match.group(1))
|
||||
% (self.auth_params['tls_hostname'], match.group(1))
|
||||
self.fail(msg)
|
||||
self.fail("SSL Exception: %s" % (error))
|
||||
|
||||
|
|
|
@ -297,7 +297,7 @@ def main():
|
|||
|
||||
# First check if we were given a dhcp_options_id
|
||||
if not params['dhcp_options_id']:
|
||||
# No, so create new_options from the parameters
|
||||
# No, so create new_options from the parameters
|
||||
if params['dns_servers'] is not None:
|
||||
new_options['domain-name-servers'] = params['dns_servers']
|
||||
if params['netbios_name_servers'] is not None:
|
||||
|
|
|
@ -387,8 +387,8 @@ def find_vgw(client, module, vpn_gateway_id=None):
|
|||
|
||||
def ensure_vgw_present(client, module):
|
||||
|
||||
# If an existing vgw name and type matches our args, then a match is considered to have been
|
||||
# found and we will not create another vgw.
|
||||
# If an existing vgw name and type matches our args, then a match is considered to have been
|
||||
# found and we will not create another vgw.
|
||||
|
||||
changed = False
|
||||
params = dict()
|
||||
|
@ -472,8 +472,8 @@ def ensure_vgw_present(client, module):
|
|||
|
||||
def ensure_vgw_absent(client, module):
|
||||
|
||||
# If an existing vgw name and type matches our args, then a match is considered to have been
|
||||
# found and we will take steps to delete it.
|
||||
# If an existing vgw name and type matches our args, then a match is considered to have been
|
||||
# found and we will take steps to delete it.
|
||||
|
||||
changed = False
|
||||
params = dict()
|
||||
|
|
|
@ -314,8 +314,8 @@ def main():
|
|||
results['task']=existing
|
||||
else:
|
||||
if not module.check_mode:
|
||||
# it exists, so we should delete it and mark changed.
|
||||
# return info about the cluster deleted
|
||||
# it exists, so we should delete it and mark changed.
|
||||
# return info about the cluster deleted
|
||||
results['task'] = service_mgr.stop_task(
|
||||
module.params['cluster'],
|
||||
module.params['task']
|
||||
|
|
|
@ -145,7 +145,7 @@ def main():
|
|||
exists = len(matching_groups) > 0
|
||||
except boto.exception.JSONResponseError as e:
|
||||
if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
|
||||
#if e.code != 'ClusterSubnetGroupNotFoundFault':
|
||||
# if e.code != 'ClusterSubnetGroupNotFoundFault':
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if state == 'absent':
|
||||
|
@ -158,17 +158,17 @@ def main():
|
|||
new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets)
|
||||
group = {
|
||||
'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
|
||||
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
|
||||
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
|
||||
'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
|
||||
['ClusterSubnetGroup']['VpcId'],
|
||||
['ClusterSubnetGroup']['VpcId'],
|
||||
}
|
||||
else:
|
||||
changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description)
|
||||
group = {
|
||||
'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
|
||||
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
|
||||
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
|
||||
'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
|
||||
['ClusterSubnetGroup']['VpcId'],
|
||||
['ClusterSubnetGroup']['VpcId'],
|
||||
}
|
||||
|
||||
changed = True
|
||||
|
|
|
@ -601,7 +601,7 @@ def main():
|
|||
if mode == 'put':
|
||||
|
||||
# Use this snippet to debug through conditionals:
|
||||
# module.exit_json(msg="Bucket return %s"%bucketrtn)
|
||||
# module.exit_json(msg="Bucket return %s"%bucketrtn)
|
||||
|
||||
# Lets check the src path.
|
||||
pathrtn = path_check(src)
|
||||
|
|
|
@ -450,12 +450,12 @@ def main():
|
|||
except Exception as e:
|
||||
module.fail_json(msg=unexpected_error_msg(e), changed=False)
|
||||
if network:
|
||||
# json_output['d4'] = 'deleting %s' % name
|
||||
# json_output['d4'] = 'deleting %s' % name
|
||||
try:
|
||||
gce.ex_destroy_network(network)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=unexpected_error_msg(e), changed=False)
|
||||
# json_output['d5'] = 'deleted %s' % name
|
||||
# json_output['d5'] = 'deleted %s' % name
|
||||
changed = True
|
||||
|
||||
json_output['changed'] = changed
|
||||
|
|
|
@ -181,12 +181,12 @@ def main():
|
|||
pem_file=dict(type='path'),
|
||||
project_id=dict(),
|
||||
),
|
||||
mutually_exclusive=[
|
||||
mutually_exclusive=[
|
||||
[ 'instance_name', 'instance_pattern' ]
|
||||
],
|
||||
],
|
||||
required_one_of=[
|
||||
[ 'instance_name', 'instance_pattern' ]
|
||||
]
|
||||
]
|
||||
)
|
||||
|
||||
instance_name = module.params.get('instance_name')
|
||||
|
|
|
@ -296,21 +296,21 @@ def load_mongocnf():
|
|||
|
||||
|
||||
def check_if_roles_changed(uinfo, roles, db_name):
|
||||
# We must be aware of users which can read the oplog on a replicaset
|
||||
# Such users must have access to the local DB, but since this DB does not store users credentials
|
||||
# and is not synchronized among replica sets, the user must be stored on the admin db
|
||||
# Therefore their structure is the following :
|
||||
# {
|
||||
# "_id" : "admin.oplog_reader",
|
||||
# "user" : "oplog_reader",
|
||||
# "db" : "admin", # <-- admin DB
|
||||
# "roles" : [
|
||||
# {
|
||||
# "role" : "read",
|
||||
# "db" : "local" # <-- local DB
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
# We must be aware of users which can read the oplog on a replicaset
|
||||
# Such users must have access to the local DB, but since this DB does not store users credentials
|
||||
# and is not synchronized among replica sets, the user must be stored on the admin db
|
||||
# Therefore their structure is the following :
|
||||
# {
|
||||
# "_id" : "admin.oplog_reader",
|
||||
# "user" : "oplog_reader",
|
||||
# "db" : "admin", # <-- admin DB
|
||||
# "roles" : [
|
||||
# {
|
||||
# "role" : "read",
|
||||
# "db" : "local" # <-- local DB
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
|
||||
def make_sure_roles_are_a_list_of_dict(roles, db_name):
|
||||
output = list()
|
||||
|
|
|
@ -635,7 +635,7 @@ class TgzArchive(object):
|
|||
|
||||
for filename in out.splitlines():
|
||||
# Compensate for locale-related problems in gtar output (octal unicode representation) #11348
|
||||
# filename = filename.decode('string_escape')
|
||||
# filename = filename.decode('string_escape')
|
||||
filename = codecs.escape_decode(filename)[0]
|
||||
if filename and filename not in self.excludes:
|
||||
self._files_in_archive.append(to_native(filename))
|
||||
|
|
|
@ -299,8 +299,7 @@ def create_maintenance(auth_headers, url, statuspage, host_ids,
|
|||
"statuspage_id": statuspage,
|
||||
"components": component_id,
|
||||
"containers": container_id,
|
||||
"all_infrastructure_affected":
|
||||
str(int(all_infrastructure_affected)),
|
||||
"all_infrastructure_affected": str(int(all_infrastructure_affected)),
|
||||
"automation": str(int(automation)),
|
||||
"maintenance_name": title,
|
||||
"maintenance_details": desc,
|
||||
|
|
|
@ -638,8 +638,8 @@ def main():
|
|||
('type','NS',['value']),
|
||||
('type','SPF',['value'])
|
||||
]
|
||||
),
|
||||
required_one_of = (
|
||||
),
|
||||
required_one_of = (
|
||||
[['record','value','type']]
|
||||
)
|
||||
)
|
||||
|
|
|
@ -419,8 +419,7 @@ def validate_feature(module, mode='show'):
|
|||
'ethernet-link-oam': 'elo',
|
||||
'port-security': 'eth_port_sec'
|
||||
},
|
||||
'config':
|
||||
{
|
||||
'config': {
|
||||
'nve': 'nv overlay',
|
||||
'vnseg_vlan': 'vn-segment-vlan-based',
|
||||
'hsrp_engine': 'hsrp',
|
||||
|
@ -435,7 +434,7 @@ def validate_feature(module, mode='show'):
|
|||
'elo': 'ethernet-link-oam',
|
||||
'eth_port_sec': 'port-security'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if feature in feature_to_be_mapped[mode]:
|
||||
feature = feature_to_be_mapped[mode][feature]
|
||||
|
|
|
@ -193,7 +193,7 @@ def main():
|
|||
authkey=dict(required=False),
|
||||
privkey=dict(required=False),
|
||||
removeplaceholder=dict(required=False)),
|
||||
required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
|
||||
required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
|
||||
supports_check_mode=False)
|
||||
|
||||
m_args = module.params
|
||||
|
|
|
@ -313,7 +313,7 @@ def main():
|
|||
size_unit = ''
|
||||
|
||||
if not '%' in size:
|
||||
# LVCREATE(8) -L --size option unit
|
||||
# LVCREATE(8) -L --size option unit
|
||||
if size[-1].lower() in 'bskmgtpe':
|
||||
size_unit = size[-1].lower()
|
||||
size = size[0:-1]
|
||||
|
|
|
@ -115,8 +115,8 @@ def main():
|
|||
enabled = dict(type='bool'),
|
||||
pattern = dict(required=False, default=None),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
required_one_of=[['state', 'enabled']],
|
||||
supports_check_mode=True,
|
||||
required_one_of=[['state', 'enabled']],
|
||||
)
|
||||
|
||||
# initialize
|
||||
|
|
|
@ -267,8 +267,8 @@ def main():
|
|||
user = dict(type='bool', default=False),
|
||||
no_block = dict(type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']],
|
||||
supports_check_mode=True,
|
||||
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']],
|
||||
)
|
||||
|
||||
systemctl = module.get_bin_path('systemctl')
|
||||
|
|
|
@ -208,7 +208,7 @@ def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority
|
|||
matching_issue_list_len = len(matching_issue_list)
|
||||
|
||||
if matching_issue_list_len == 0:
|
||||
# The issue does not exist in the project
|
||||
# The issue does not exist in the project
|
||||
if state == "present":
|
||||
# This implies a change
|
||||
changed = True
|
||||
|
@ -226,7 +226,7 @@ def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority
|
|||
return (True, changed, "Issue does not exist", {})
|
||||
|
||||
elif matching_issue_list_len == 1:
|
||||
# The issue exists in the project
|
||||
# The issue exists in the project
|
||||
if state == "absent":
|
||||
# This implies a change
|
||||
changed = True
|
||||
|
|
|
@ -98,7 +98,7 @@ class LookupModule(LookupBase):
|
|||
except ValueError:
|
||||
raise AnsibleError(
|
||||
"can't parse arg %s=%r as integer"
|
||||
% (arg, arg_raw)
|
||||
% (arg, arg_raw)
|
||||
)
|
||||
if 'format' in args:
|
||||
self.format = args.pop("format")
|
||||
|
|
|
@ -107,7 +107,6 @@ lib/ansible/modules/cloud/amazon/rds.py
|
|||
lib/ansible/modules/cloud/amazon/rds_param_group.py
|
||||
lib/ansible/modules/cloud/amazon/rds_subnet_group.py
|
||||
lib/ansible/modules/cloud/amazon/redshift.py
|
||||
lib/ansible/modules/cloud/amazon/redshift_subnet_group.py
|
||||
lib/ansible/modules/cloud/amazon/route53.py
|
||||
lib/ansible/modules/cloud/amazon/route53_health_check.py
|
||||
lib/ansible/modules/cloud/amazon/route53_zone.py
|
||||
|
@ -442,7 +441,6 @@ lib/ansible/plugins/lookup/dnstxt.py
|
|||
lib/ansible/plugins/lookup/first_found.py
|
||||
lib/ansible/plugins/lookup/hashi_vault.py
|
||||
lib/ansible/plugins/lookup/mongodb.py
|
||||
lib/ansible/plugins/lookup/sequence.py
|
||||
lib/ansible/plugins/shell/fish.py
|
||||
lib/ansible/plugins/shell/sh.py
|
||||
lib/ansible/plugins/strategy/__init__.py
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
E111
|
||||
E114
|
||||
E115
|
||||
E121
|
||||
E122
|
||||
E125
|
||||
E126
|
||||
E129
|
||||
E131
|
||||
E501
|
||||
E712
|
||||
E721
|
||||
|
|
|
@ -147,7 +147,7 @@ CODENAME = Malachite
|
|||
"distribution_version": "42.1",
|
||||
}
|
||||
},
|
||||
{
|
||||
{
|
||||
'name': 'openSUSE 13.2',
|
||||
'input': {'/etc/SuSE-release': """openSUSE 13.2 (x86_64)
|
||||
VERSION = 13.2
|
||||
|
|
Loading…
Reference in a new issue