PEP 8 E111 & E114 cleanup. (#20838)

This commit is contained in:
Matt Clay 2017-01-30 15:01:47 -08:00 committed by GitHub
parent 1609afbd12
commit cb76200c7d
119 changed files with 339 additions and 378 deletions

View file

@ -130,18 +130,18 @@ def generate_inv_from_api(enterprise_entity,config):
if (config.getboolean('defaults', 'public_ip_only')) == True: if (config.getboolean('defaults', 'public_ip_only')) == True:
for link in vmcollection['links']: for link in vmcollection['links']:
if (link['type']=='application/vnd.abiquo.publicip+json' and link['rel']=='ip'): if (link['type']=='application/vnd.abiquo.publicip+json' and link['rel']=='ip'):
vm_nic = link['title'] vm_nic = link['title']
break break
else: else:
vm_nic = None vm_nic = None
# Otherwise, assigning defined network interface IP address # Otherwise, assigning defined network interface IP address
else: else:
for link in vmcollection['links']: for link in vmcollection['links']:
if (link['rel']==config.get('defaults', 'default_net_interface')): if (link['rel']==config.get('defaults', 'default_net_interface')):
vm_nic = link['title'] vm_nic = link['title']
break break
else: else:
vm_nic = None vm_nic = None
vm_state = True vm_state = True
# From abiquo.ini: Only adding to inventory VMs deployed # From abiquo.ini: Only adding to inventory VMs deployed

View file

@ -735,7 +735,7 @@ class DockerInventory(object):
cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path
if cert_path and cert_path == self._env_args.cert_path: if cert_path and cert_path == self._env_args.cert_path:
cert_path = os.path.join(cert_path, 'cert.pem') cert_path = os.path.join(cert_path, 'cert.pem')
cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path
if cacert_path and cacert_path == self._env_args.cert_path: if cacert_path and cacert_path == self._env_args.cert_path:

View file

@ -308,13 +308,13 @@ class Ec2Inventory(object):
if self.all_instances: if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'): elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','): for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip() instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states: if instance_state not in ec2_valid_instance_states:
continue continue
self.ec2_instance_states.append(instance_state) self.ec2_instance_states.append(instance_state)
else: else:
self.ec2_instance_states = ['running'] self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled) # Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:

View file

@ -151,7 +151,7 @@ from click.exceptions import UsageError
from six import string_types from six import string_types
def warning(*objs): def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr) print("WARNING: ", *objs, file=sys.stderr)
class NSoTInventory(object): class NSoTInventory(object):

View file

@ -169,7 +169,7 @@ class SoftLayerInventory(object):
# Inventory: group by tag # Inventory: group by tag
for tag in instance['tagReferences']: for tag in instance['tagReferences']:
self.push(self.inventory, tag['tag']['name'], dest) self.push(self.inventory, tag['tag']['name'], dest)
def get_virtual_servers(self): def get_virtual_servers(self):
'''Get all the CCI instances''' '''Get all the CCI instances'''

View file

@ -187,7 +187,7 @@ if options.list:
groups[group_name].add(system['spacewalk_server_name']) groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]: if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ] meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
except (OSError) as e: except (OSError) as e:
print('Problem executing the command "%s system-groups-systems": %s' % print('Problem executing the command "%s system-groups-systems": %s' %

View file

@ -24,10 +24,10 @@ except ImportError:
import simplejson as json import simplejson as json
class SetEncoder(json.JSONEncoder): class SetEncoder(json.JSONEncoder):
def default(self, obj): def default(self, obj):
if isinstance(obj, set): if isinstance(obj, set):
return list(obj) return list(obj)
return json.JSONEncoder.default(self, obj) return json.JSONEncoder.default(self, obj)
VBOX="VBoxManage" VBOX="VBoxManage"

View file

@ -27,10 +27,10 @@ result['all'] = {}
pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True) pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [] result['all']['hosts'] = []
for l in pipe.stdout.readlines(): for l in pipe.stdout.readlines():
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
s = l.split(':') s = l.split(':')
if s[1] != 'global': if s[1] != 'global':
result['all']['hosts'].append(s[1]) result['all']['hosts'].append(s[1])
result['all']['vars'] = {} result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'zone' result['all']['vars']['ansible_connection'] = 'zone'

View file

@ -36,13 +36,13 @@ if _system_six:
# If we need some things from even newer versions of six, then we need to # If we need some things from even newer versions of six, then we need to
# use our bundled copy instead # use our bundled copy instead
if ( # Added in six-1.8.0 if ( # Added in six-1.8.0
not hasattr(_system_six.moves, 'shlex_quote') or not hasattr(_system_six.moves, 'shlex_quote') or
# Added in six-1.4.0 # Added in six-1.4.0
not hasattr(_system_six, 'byte2int') or not hasattr(_system_six, 'byte2int') or
not hasattr(_system_six, 'add_metaclass') or not hasattr(_system_six, 'add_metaclass') or
not hasattr(_system_six.moves, 'urllib') not hasattr(_system_six.moves, 'urllib')
): ):
_system_six = False _system_six = False

View file

@ -252,7 +252,7 @@ class GalaxyRole(object):
tmp_file = self.fetch(role_data) tmp_file = self.fetch(role_data)
else: else:
raise AnsibleError("No valid role data found") raise AnsibleError("No valid role data found")
if tmp_file: if tmp_file:

View file

@ -376,9 +376,9 @@ class AzureRMModuleBase(object):
dependencies = dict() dependencies = dict()
if enum_modules: if enum_modules:
for module_name in enum_modules: for module_name in enum_modules:
mod = importlib.import_module(module_name) mod = importlib.import_module(module_name)
for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass): for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
dependencies[mod_class_name] = mod_class_obj dependencies[mod_class_name] = mod_class_obj
self.log("dependencies: ") self.log("dependencies: ")
self.log(str(dependencies)) self.log(str(dependencies))
serializer = Serializer(classes=dependencies) serializer = Serializer(classes=dependencies)

View file

@ -1399,7 +1399,7 @@ class AnsibleModule(object):
','.join(sorted(list(unsupported_parameters))), ','.join(sorted(list(unsupported_parameters))),
','.join(sorted(self.argument_spec.keys())))) ','.join(sorted(self.argument_spec.keys()))))
if self.check_mode and not self.supports_check_mode: if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name) self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check): def _count_terms(self, check):
count = 0 count = 0

View file

@ -249,7 +249,7 @@ class AnsibleDockerClient(Client):
tls_config = TLSConfig(**kwargs) tls_config = TLSConfig(**kwargs)
return tls_config return tls_config
except TLSParameterError as exc: except TLSParameterError as exc:
self.fail("TLS config error: %s" % exc) self.fail("TLS config error: %s" % exc)
def _get_connect_params(self): def _get_connect_params(self):
auth = self.auth_params auth = self.auth_params

View file

@ -68,7 +68,7 @@ def run_commands(module, commands):
module.fail_json(msg=err) module.fail_json(msg=err)
try: try:
out = module.from_json(out) out = module.from_json(out)
except ValueError: except ValueError:
out = str(out).strip() out = str(out).strip()

View file

@ -325,7 +325,7 @@ class Facts(object):
def get_pkg_mgr_facts(self): def get_pkg_mgr_facts(self):
if self.facts['system'] == 'OpenBSD': if self.facts['system'] == 'OpenBSD':
self.facts['pkg_mgr'] = 'openbsd_pkg' self.facts['pkg_mgr'] = 'openbsd_pkg'
else: else:
self.facts['pkg_mgr'] = 'unknown' self.facts['pkg_mgr'] = 'unknown'
for pkg in Facts.PKG_MGRS: for pkg in Facts.PKG_MGRS:
@ -476,9 +476,9 @@ class Facts(object):
def get_apparmor_facts(self): def get_apparmor_facts(self):
self.facts['apparmor'] = {} self.facts['apparmor'] = {}
if os.path.exists('/sys/kernel/security/apparmor'): if os.path.exists('/sys/kernel/security/apparmor'):
self.facts['apparmor']['status'] = 'enabled' self.facts['apparmor']['status'] = 'enabled'
else: else:
self.facts['apparmor']['status'] = 'disabled' self.facts['apparmor']['status'] = 'disabled'
def get_caps_facts(self): def get_caps_facts(self):
capsh_path = self.module.get_bin_path('capsh') capsh_path = self.module.get_bin_path('capsh')
@ -884,7 +884,7 @@ class Distribution(object):
# example pattern are 13.04 13.0 13 # example pattern are 13.04 13.0 13
distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line) distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
if distribution_version: if distribution_version:
self.facts['distribution_version'] = distribution_version.group(1) self.facts['distribution_version'] = distribution_version.group(1)
if 'open' in data.lower(): if 'open' in data.lower():
release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line) release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line)
if release: if release:
@ -1071,8 +1071,8 @@ class LinuxHardware(Hardware):
self.facts["%s_mb" % key.lower()] = int(val) // 1024 self.facts["%s_mb" % key.lower()] = int(val) // 1024
if key in self.MEMORY_FACTS: if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0] val = data[1].strip().split(' ')[0]
memstats[key.lower()] = int(val) // 1024 memstats[key.lower()] = int(val) // 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')): if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree'] memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
@ -1811,8 +1811,8 @@ class OpenBSDHardware(Hardware):
} }
for mib in sysctl_to_dmi: for mib in sysctl_to_dmi:
if mib in self.sysctl: if mib in self.sysctl:
self.facts[sysctl_to_dmi[mib]] = self.sysctl[mib] self.facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
class FreeBSDHardware(Hardware): class FreeBSDHardware(Hardware):
""" """
@ -2040,8 +2040,8 @@ class NetBSDHardware(Hardware):
} }
for mib in sysctl_to_dmi: for mib in sysctl_to_dmi:
if mib in self.sysctl: if mib in self.sysctl:
self.facts[sysctl_to_dmi[mib]] = self.sysctl[mib] self.facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
class AIX(Hardware): class AIX(Hardware):
""" """
@ -2283,8 +2283,8 @@ class HPUX(Hardware):
if os.access("/dev/kmem", os.R_OK): if os.access("/dev/kmem", os.R_OK):
rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True) rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
if not err: if not err:
data = out data = out
self.facts['memtotal_mb'] = int(data) / 256 self.facts['memtotal_mb'] = int(data) / 256
else: else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True) rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip() data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
@ -2308,7 +2308,7 @@ class HPUX(Hardware):
self.facts['firmware_version'] = out.split(separator)[1].strip() self.facts['firmware_version'] = out.split(separator)[1].strip()
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Machine serial number' ",use_unsafe_shell=True) rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Machine serial number' ",use_unsafe_shell=True)
if rc == 0 and out: if rc == 0 and out:
self.facts['product_serial'] = out.split(separator)[1].strip() self.facts['product_serial'] = out.split(separator)[1].strip()
class Darwin(Hardware): class Darwin(Hardware):
""" """
@ -2926,18 +2926,18 @@ class HPUXNetwork(Network):
interfaces = self.get_interfaces_info() interfaces = self.get_interfaces_info()
self.facts['interfaces'] = interfaces.keys() self.facts['interfaces'] = interfaces.keys()
for iface in interfaces: for iface in interfaces:
self.facts[iface] = interfaces[iface] self.facts[iface] = interfaces[iface]
return self.facts return self.facts
def get_default_interfaces(self): def get_default_interfaces(self):
rc, out, err = self.module.run_command("/usr/bin/netstat -nr") rc, out, err = self.module.run_command("/usr/bin/netstat -nr")
lines = out.splitlines() lines = out.splitlines()
for line in lines: for line in lines:
words = line.split() words = line.split()
if len(words) > 1: if len(words) > 1:
if words[0] == 'default': if words[0] == 'default':
self.facts['default_interface'] = words[4] self.facts['default_interface'] = words[4]
self.facts['default_gateway'] = words[1] self.facts['default_gateway'] = words[1]
def get_interfaces_info(self): def get_interfaces_info(self):
interfaces = {} interfaces = {}
@ -3122,7 +3122,7 @@ class OpenBSDNetwork(GenericBsdIfconfigNetwork):
# OpenBSD 'ifconfig -a' does not have information about aliases # OpenBSD 'ifconfig -a' does not have information about aliases
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'): def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options) return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
# Return macaddress instead of lladdr # Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips): def parse_lladdr_line(self, words, current_if, ips):

View file

@ -101,7 +101,7 @@ def get_fqdn(repo_url):
return result return result
def check_hostkey(module, fqdn): def check_hostkey(module, fqdn):
return not not_in_host_file(module, fqdn) return not not_in_host_file(module, fqdn)
# this is a variant of code found in connection_plugins/paramiko.py and we should modify # this is a variant of code found in connection_plugins/paramiko.py and we should modify
# the paramiko code to import and use this. # the paramiko code to import and use this.

View file

@ -226,7 +226,7 @@ def connect_to_api(module, disconnect_atexit=True):
def get_all_objs(content, vimtype, folder=None, recurse=True): def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder: if not folder:
folder = content.rootFolder folder = content.rootFolder
obj = {} obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse) container = content.viewManager.CreateContainerView(folder, vimtype, recurse)

View file

@ -184,9 +184,9 @@ def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clea
if not dry_run: if not dry_run:
statement['Principal']['AWS'].append(role_arn) statement['Principal']['AWS'].append(role_arn)
elif role_arn in statement['Principal']['AWS']: # not one the places the role should be elif role_arn in statement['Principal']['AWS']: # not one the places the role should be
changes_needed[granttype] = 'remove' changes_needed[granttype] = 'remove'
if not dry_run: if not dry_run:
statement['Principal']['AWS'].remove(role_arn) statement['Principal']['AWS'].remove(role_arn)
elif mode == 'deny' and statement['Sid'] == statement_label[granttype] and role_arn in statement['Principal']['AWS']: elif mode == 'deny' and statement['Sid'] == statement_label[granttype] and role_arn in statement['Principal']['AWS']:
# we don't selectively deny. that's a grant with a # we don't selectively deny. that's a grant with a

View file

@ -222,7 +222,7 @@ class CloudFormationServiceManager:
result = response.get(result_key) result = response.get(result_key)
next_token = response.get('NextToken') next_token = response.get('NextToken')
if not next_token: if not next_token:
return result return result
return result + self.paginated_response(func, result_key, next_token) return result + self.paginated_response(func, result_key, next_token)
def to_dict(items, key, value): def to_dict(items, key, value):
@ -246,7 +246,7 @@ def main():
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_BOTO3: if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.') module.fail_json(msg='boto3 is required.')
# Describe the stack # Describe the stack
service_mgr = CloudFormationServiceManager(module) service_mgr = CloudFormationServiceManager(module)

View file

@ -388,7 +388,7 @@ def main():
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_BOTO3: if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.') module.fail_json(msg='boto3 is required.')
service_mgr = CloudFrontServiceManager(module) service_mgr = CloudFrontServiceManager(module)

View file

@ -139,13 +139,13 @@ class CloudTrailManager:
ret = self.conn.describe_trails(trail_name_list=[name]) ret = self.conn.describe_trails(trail_name_list=[name])
trailList = ret.get('trailList', []) trailList = ret.get('trailList', [])
if len(trailList) == 1: if len(trailList) == 1:
return trailList[0] return trailList[0]
return None return None
def exists(self, name=None): def exists(self, name=None):
ret = self.view(name) ret = self.view(name)
if ret: if ret:
return True return True
return False return False
def enable_logging(self, name): def enable_logging(self, name):
@ -180,7 +180,7 @@ def main():
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
if not HAS_BOTO: if not HAS_BOTO:
module.fail_json(msg='boto is required.') module.fail_json(msg='boto is required.')
ec2_url, access_key, secret_key, region = get_ec2_creds(module) ec2_url, access_key, secret_key, region = get_ec2_creds(module)
aws_connect_params = dict(aws_access_key_id=access_key, aws_connect_params = dict(aws_access_key_id=access_key,

View file

@ -344,7 +344,7 @@ def validate_index(index, module):
module.fail_json(msg='%s is not a valid option for an index' % key) module.fail_json(msg='%s is not a valid option for an index' % key)
for required_option in INDEX_REQUIRED_OPTIONS: for required_option in INDEX_REQUIRED_OPTIONS:
if required_option not in index: if required_option not in index:
module.fail_json(msg='%s is a required option for an index' % required_option) module.fail_json(msg='%s is a required option for an index' % required_option)
if index['type'] not in INDEX_TYPE_OPTIONS: if index['type'] not in INDEX_TYPE_OPTIONS:
module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS)) module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))

View file

@ -1033,7 +1033,7 @@ def create_instances(module, ec2, vpc, override_count=None):
grp_details = ec2.get_all_security_groups(group_ids=group_id) grp_details = ec2.get_all_security_groups(group_ids=group_id)
group_name = [grp_item.name for grp_item in grp_details] group_name = [grp_item.name for grp_item in grp_details]
except boto.exception.NoAuthHandlerFound as e: except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e)) module.fail_json(msg = str(e))
# Lookup any instances that much our run id. # Lookup any instances that much our run id.
@ -1065,11 +1065,11 @@ def create_instances(module, ec2, vpc, override_count=None):
'user_data': user_data} 'user_data': user_data}
if ebs_optimized: if ebs_optimized:
params['ebs_optimized'] = ebs_optimized params['ebs_optimized'] = ebs_optimized
# 'tenancy' always has a default value, but it is not a valid parameter for spot instance request # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
if not spot_price: if not spot_price:
params['tenancy'] = tenancy params['tenancy'] = tenancy
if boto_supports_profile_name_arg(ec2): if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name params['instance_profile_name'] = instance_profile_name
@ -1184,8 +1184,8 @@ def create_instances(module, ec2, vpc, override_count=None):
if boto_supports_param_in_spot_request(ec2, 'placement_group'): if boto_supports_param_in_spot_request(ec2, 'placement_group'):
params['placement_group'] = placement_group params['placement_group'] = placement_group
elif placement_group : elif placement_group :
module.fail_json( module.fail_json(
msg="placement_group parameter requires Boto version 2.3.0 or higher.") msg="placement_group parameter requires Boto version 2.3.0 or higher.")
# You can't tell spot instances to 'stop'; they will always be # You can't tell spot instances to 'stop'; they will always be
# 'terminate'd. For convenience, we'll ignore the latter value. # 'terminate'd. For convenience, we'll ignore the latter value.

View file

@ -178,7 +178,7 @@ def list_launch_configs(connection, module):
launch_config['CreatedTime'] = str(launch_config['CreatedTime']) launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
if sort: if sort:
snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order=='descending')) snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
try: try:
if sort and sort_start and sort_end: if sort and sort_start and sort_end:

View file

@ -176,12 +176,12 @@ def main():
module.fail_json(msg="tags argument is required when state is absent") module.fail_json(msg="tags argument is required when state is absent")
for (key, value) in set(tags.items()): for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()): if (key, value) not in set(tagdict.items()):
baddict[key] = value baddict[key] = value
if set(baddict) == set(tags): if set(baddict) == set(tags):
module.exit_json(msg="Nothing to remove here. Move along.", changed=False) module.exit_json(msg="Nothing to remove here. Move along.", changed=False)
for (key, value) in set(tags.items()): for (key, value) in set(tags.items()):
if (key, value) in set(tagdict.items()): if (key, value) in set(tagdict.items()):
dictremove[key] = value dictremove[key] = value
tagger = ec2.delete_tags(resource, dictremove) tagger = ec2.delete_tags(resource, dictremove)
gettags = ec2.get_all_tags(filters=filters) gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True) module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)

View file

@ -333,7 +333,7 @@ def main():
supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id':params['dhcp_options_id']}) supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id':params['dhcp_options_id']})
if len(supplied_options) != 1: if len(supplied_options) != 1:
if params['state'] != 'absent': if params['state'] != 'absent':
module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist") module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
else: else:
found = True found = True
dhcp_option = supplied_options[0] dhcp_option = supplied_options[0]

View file

@ -131,7 +131,7 @@ def list_dhcp_options(client, module):
snaked_dhcp_options_array = [] snaked_dhcp_options_array = []
for dhcp_option in all_dhcp_options_array: for dhcp_option in all_dhcp_options_array:
snaked_dhcp_options_array.append(camel_dict_to_snake_dict(dhcp_option)) snaked_dhcp_options_array.append(camel_dict_to_snake_dict(dhcp_option))
module.exit_json(dhcp_options=snaked_dhcp_options_array) module.exit_json(dhcp_options=snaked_dhcp_options_array)

View file

@ -387,7 +387,7 @@ def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS) err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
except botocore.exceptions.ClientError as e: except botocore.exceptions.ClientError as e:
err_msg = str(e) err_msg = str(e)
return gateways_retrieved, err_msg, existing_gateways return gateways_retrieved, err_msg, existing_gateways
@ -592,7 +592,7 @@ def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
) )
except botocore.exceptions.ClientError as e: except botocore.exceptions.ClientError as e:
err_msg = str(e) err_msg = str(e)
return allocation_id, err_msg return allocation_id, err_msg

View file

@ -353,10 +353,10 @@ def main():
) )
if not HAS_BOTO: if not HAS_BOTO:
module.fail_json(msg='boto is required.') module.fail_json(msg='boto is required.')
if not HAS_BOTO3: if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.') module.fail_json(msg='boto3 is required.')
service_mgr = EcsServiceManager(module) service_mgr = EcsServiceManager(module)

View file

@ -217,10 +217,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO: if not HAS_BOTO:
module.fail_json(msg='boto is required.') module.fail_json(msg='boto is required.')
if not HAS_BOTO3: if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.') module.fail_json(msg='boto3 is required.')
show_details = module.params.get('details', False) show_details = module.params.get('details', False)

View file

@ -248,10 +248,10 @@ def main():
# Validate Requirements # Validate Requirements
if not HAS_BOTO: if not HAS_BOTO:
module.fail_json(msg='boto is required.') module.fail_json(msg='boto is required.')
if not HAS_BOTO3: if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.') module.fail_json(msg='boto3 is required.')
# Validate Inputs # Validate Inputs
if module.params['operation'] == 'run': if module.params['operation'] == 'run':

View file

@ -224,10 +224,10 @@ def main():
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO: if not HAS_BOTO:
module.fail_json(msg='boto is required.') module.fail_json(msg='boto is required.')
if not HAS_BOTO3: if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.') module.fail_json(msg='boto3 is required.')
task_to_describe = None task_to_describe = None
task_mgr = EcsTaskManager(module) task_mgr = EcsTaskManager(module)

View file

@ -314,7 +314,7 @@ def main():
# If VPC configuration is desired # If VPC configuration is desired
if vpc_subnet_ids or vpc_security_group_ids: if vpc_subnet_ids or vpc_security_group_ids:
if len(vpc_subnet_ids) < 1: if len(vpc_subnet_ids) < 1:
module.fail_json(msg='At least 1 subnet is required') module.fail_json(msg='At least 1 subnet is required')
if len(vpc_security_group_ids) < 1: if len(vpc_security_group_ids) < 1:
module.fail_json(msg='At least 1 security group is required') module.fail_json(msg='At least 1 security group is required')

View file

@ -352,7 +352,7 @@ class RDSConnection:
try: try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params) self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError as e: except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message) module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename): def get_db_instance(self, instancename):
try: try:
@ -438,7 +438,7 @@ class RDS2Connection:
try: try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params) self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError as e: except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message) module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename): def get_db_instance(self, instancename):
try: try:

View file

@ -282,8 +282,8 @@ def gather_files(fileroot, include=None, exclude=None):
if include: if include:
found = False found = False
for x in include.split(','): for x in include.split(','):
if fnmatch.fnmatch(fn, x): if fnmatch.fnmatch(fn, x):
found = True found = True
if not found: if not found:
# not on the include list, so we don't want it. # not on the include list, so we don't want it.
continue continue
@ -376,7 +376,7 @@ def filter_list(s3, bucket, s3filelist, strategy):
keeplist = list(s3filelist) keeplist = list(s3filelist)
for e in keeplist: for e in keeplist:
e['_strategy'] = strategy e['_strategy'] = strategy
# init/fetch info from S3 if we're going to use it for comparisons # init/fetch info from S3 if we're going to use it for comparisons
if not strategy == 'force': if not strategy == 'force':

View file

@ -319,10 +319,10 @@ class SnsTopicManager(object):
def ensure_gone(self): def ensure_gone(self):
self.arn_topic = self._arn_topic_lookup() self.arn_topic = self._arn_topic_lookup()
if self.arn_topic: if self.arn_topic:
self._get_topic_subs() self._get_topic_subs()
if self.subscriptions_existing: if self.subscriptions_existing:
self._delete_subscriptions() self._delete_subscriptions()
self._delete_topic() self._delete_topic()
def get_info(self): def get_info(self):

View file

@ -258,11 +258,11 @@ try:
import azure as windows_azure import azure as windows_azure
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1": if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1":
from azure import WindowsAzureError as AzureException from azure import WindowsAzureError as AzureException
from azure import WindowsAzureMissingResourceError as AzureMissingException from azure import WindowsAzureMissingResourceError as AzureMissingException
else: else:
from azure.common import AzureException as AzureException from azure.common import AzureException as AzureException
from azure.common import AzureMissingResourceHttpError as AzureMissingException from azure.common import AzureMissingResourceHttpError as AzureMissingException
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys, from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints, PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,

View file

@ -508,7 +508,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
id=pip.id, id=pip.id,
location=pip.location, location=pip.location,
resource_guid=pip.resource_guid) resource_guid=pip.resource_guid)
#name=pip.name, #name=pip.name,
if results['network_security_group'].get('id'): if results['network_security_group'].get('id'):
nsg = self.get_security_group(results['network_security_group']['name']) nsg = self.get_security_group(results['network_security_group']['name'])

View file

@ -605,7 +605,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
self.log("Using image version {0}".format(self.image['version'])) self.log("Using image version {0}".format(self.image['version']))
if not self.storage_blob_name: if not self.storage_blob_name:
self.storage_blob_name = self.name + '.vhd' self.storage_blob_name = self.name + '.vhd'
if self.storage_account_name: if self.storage_account_name:
self.get_storage_account(self.storage_account_name) self.get_storage_account(self.storage_account_name)

View file

@ -286,7 +286,7 @@ class ClcPublicIp(object):
result = None result = None
try: try:
for ip_address in server.PublicIPs().public_ips: for ip_address in server.PublicIPs().public_ips:
result = ip_address.Delete() result = ip_address.Delete()
except CLCException as ex: except CLCException as ex:
self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
server.id, ex.response_text server.id, ex.response_text

View file

@ -335,7 +335,7 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack):
poll_async = self.module.params.get('poll_async') poll_async = self.module.params.get('poll_async')
if poll_async: if poll_async:
firewall_rule = self.poll_job(res, 'firewallrule') firewall_rule = self.poll_job(res, 'firewallrule')
return firewall_rule return firewall_rule
@ -359,7 +359,7 @@ class AnsibleCloudStackFirewall(AnsibleCloudStack):
poll_async = self.module.params.get('poll_async') poll_async = self.module.params.get('poll_async')
if poll_async: if poll_async:
res = self.poll_job(res, 'firewallrule') res = self.poll_job(res, 'firewallrule')
return firewall_rule return firewall_rule

View file

@ -273,7 +273,7 @@ class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
args['projectid'] = self.get_project('id') args['projectid'] = self.get_project('id')
sgs = self.cs.listSecurityGroups(**args) sgs = self.cs.listSecurityGroups(**args)
if not sgs or 'securitygroup' not in sgs: if not sgs or 'securitygroup' not in sgs:
self.module.fail_json(msg="security group '%s' not found" % security_group_name) self.module.fail_json(msg="security group '%s' not found" % security_group_name)
return sgs['securitygroup'][0] return sgs['securitygroup'][0]

View file

@ -291,8 +291,8 @@ class ImageManager(DockerBaseClass):
# If name contains a tag, it takes precedence over tag parameter. # If name contains a tag, it takes precedence over tag parameter.
repo, repo_tag = parse_repository_tag(self.name) repo, repo_tag = parse_repository_tag(self.name)
if repo_tag: if repo_tag:
self.name = repo self.name = repo
self.tag = repo_tag self.tag = repo_tag
if self.state in ['present', 'build']: if self.state in ['present', 'build']:
self.present() self.present()

View file

@ -520,8 +520,8 @@ def get_stdout(path_name):
new_line = re.sub(r'\x1b\[.+m', '', line.encode('ascii')) new_line = re.sub(r'\x1b\[.+m', '', line.encode('ascii'))
full_stdout += new_line full_stdout += new_line
if new_line.strip(): if new_line.strip():
# Assuming last line contains the error message # Assuming last line contains the error message
last_line = new_line.strip().encode('utf-8') last_line = new_line.strip().encode('utf-8')
fd.close() fd.close()
os.remove(path_name) os.remove(path_name)
return full_stdout, last_line return full_stdout, last_line

View file

@ -332,7 +332,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
# If bucket exists but key doesn't, just upload. # If bucket exists but key doesn't, just upload.
if bucket_rc and not key_rc: if bucket_rc and not key_rc:
upload_gsfile(module, gs, bucket, obj, src, expiration) upload_gsfile(module, gs, bucket, obj, src, expiration)
def handle_delete(module, gs, bucket, obj): def handle_delete(module, gs, bucket, obj):
if bucket and not obj: if bucket and not obj:

View file

@ -287,7 +287,7 @@ def main():
json_output['ipv4_range'] = network.cidr json_output['ipv4_range'] = network.cidr
if network and mode == 'custom' and subnet_name: if network and mode == 'custom' and subnet_name:
if not hasattr(gce, 'ex_get_subnetwork'): if not hasattr(gce, 'ex_get_subnetwork'):
module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False) module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region) subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
json_output['subnet_name'] = subnet_name json_output['subnet_name'] = subnet_name

View file

@ -220,7 +220,7 @@ def main():
except ResourceNotFoundError: except ResourceNotFoundError:
module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False) module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
except GoogleBaseError as e: except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False, exception=traceback.format_exc()) module.fail_json(msg=str(e), changed=False, exception=traceback.format_exc())
# Tag nodes # Tag nodes
instance_pattern_matches = [] instance_pattern_matches = []

View file

@ -626,10 +626,10 @@ class RHEVConn(object):
setFailed() setFailed()
return False return False
elif int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)): elif int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
setMsg("Shrinking disks is not supported") setMsg("Shrinking disks is not supported")
setMsg(str(e)) setMsg(str(e))
setFailed() setFailed()
return False return False
else: else:
setMsg("The size of the disk is correct") setMsg("The size of the disk is correct")
if str(DISK.interface) != str(diskinterface): if str(DISK.interface) != str(diskinterface):

View file

@ -154,7 +154,7 @@ def get_vms(session):
vms = change_keys(recs, key='uuid') vms = change_keys(recs, key='uuid')
for vm in vms.values(): for vm in vms.values():
xs_vms[vm['name_label']] = vm xs_vms[vm['name_label']] = vm
return xs_vms return xs_vms
@ -165,7 +165,7 @@ def get_srs(session):
return None return None
srs = change_keys(recs, key='uuid') srs = change_keys(recs, key='uuid')
for sr in srs.values(): for sr in srs.values():
xs_srs[sr['name_label']] = sr xs_srs[sr['name_label']] = sr
return xs_srs return xs_srs
def main(): def main():

View file

@ -443,14 +443,14 @@ def _create_server(module, nova):
server = nova.servers.create(*bootargs, **bootkwargs) server = nova.servers.create(*bootargs, **bootkwargs)
server = nova.servers.get(server.id) server = nova.servers.get(server.id)
except Exception as e: except Exception as e:
module.fail_json( msg = "Error in creating instance: %s " % e.message) module.fail_json( msg = "Error in creating instance: %s " % e.message)
if module.params['wait'] == 'yes': if module.params['wait'] == 'yes':
expire = time.time() + int(module.params['wait_for']) expire = time.time() + int(module.params['wait_for'])
while time.time() < expire: while time.time() < expire:
try: try:
server = nova.servers.get(server.id) server = nova.servers.get(server.id)
except Exception as e: except Exception as e:
module.fail_json( msg = "Error in getting info from instance: %s" % e.message) module.fail_json( msg = "Error in getting info from instance: %s" % e.message)
if server.status == 'ACTIVE': if server.status == 'ACTIVE':
server = _add_floating_ip(module, nova, server) server = _add_floating_ip(module, nova, server)
@ -466,7 +466,7 @@ def _create_server(module, nova):
module.fail_json(msg = "Timeout waiting for the server to come up.. Please check manually") module.fail_json(msg = "Timeout waiting for the server to come up.. Please check manually")
if server.status == 'ERROR': if server.status == 'ERROR':
module.fail_json(msg = "Error in creating the server.. Please check manually") module.fail_json(msg = "Error in creating the server.. Please check manually")
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private') private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')

View file

@ -147,7 +147,7 @@ def _get_server_state(module, nova):
server_info = info server_info = info
break break
except Exception as e: except Exception as e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message) module.fail_json(msg = "Error in getting the server list: %s" % e.message)
return server_info, server return server_info, server
def _get_port_id(neutron, module, instance_id): def _get_port_id(neutron, module, instance_id):

View file

@ -267,12 +267,12 @@ def main():
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required') module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
if module.params['provider_network_type'] in ['vlan' , 'flat']: if module.params['provider_network_type'] in ['vlan' , 'flat']:
if not module.params['provider_physical_network']: if not module.params['provider_physical_network']:
module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.") module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.")
if module.params['provider_network_type'] in ['vlan', 'gre']: if module.params['provider_network_type'] in ['vlan', 'gre']:
if not module.params['provider_segmentation_id']: if not module.params['provider_segmentation_id']:
module.fail_json(msg = " for vlan & gre networks, variable provider_segmentation_id should be set.") module.fail_json(msg = " for vlan & gre networks, variable provider_segmentation_id should be set.")
neutron = _get_neutron_client(module, module.params) neutron = _get_neutron_client(module, module.params)

View file

@ -141,7 +141,7 @@ def _get_router_id(module, neutron):
except Exception as e: except Exception as e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message) module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']: if not routers['routers']:
return None return None
return routers['routers'][0]['id'] return routers['routers'][0]['id']
def _get_net_id(neutron, module): def _get_net_id(neutron, module):

View file

@ -201,7 +201,7 @@ def _get_net_id(neutron, module):
except Exception as e: except Exception as e:
module.fail_json("Error in listing neutron networks: %s" % e.message) module.fail_json("Error in listing neutron networks: %s" % e.message)
if not networks['networks']: if not networks['networks']:
return None return None
return networks['networks'][0]['id'] return networks['networks'][0]['id']

View file

@ -208,7 +208,7 @@ def _check_set_power_state(module, cloud, node):
if 'power off' in str(node['power_state']): if 'power off' in str(node['power_state']):
if (_is_false(module.params['power']) and if (_is_false(module.params['power']) and
_is_false(module.params['state'])): _is_false(module.params['state'])):
return False return False
if (_is_false(module.params['power']) and if (_is_false(module.params['power']) and
_is_false(module.params['state'])): _is_false(module.params['state'])):
module.exit_json( module.exit_json(

View file

@ -100,7 +100,7 @@ def process_object(
if container_obj: if container_obj:
if name: if name:
if cloud_obj.get_object_metadata(container, name): if cloud_obj.get_object_metadata(container, name):
cloud_obj.delete_object(container, name) cloud_obj.delete_object(container, name)
changed= True changed= True
else: else:
cloud_obj.delete_container(container) cloud_obj.delete_container(container)

View file

@ -176,7 +176,7 @@ def _can_update(subnet, module, cloud):
else: else:
module.fail_json(msg='No network found for %s' % network_name) module.fail_json(msg='No network found for %s' % network_name)
if netid != subnet['network_id']: if netid != subnet['network_id']:
module.fail_json(msg='Cannot update network_name in existing \ module.fail_json(msg='Cannot update network_name in existing \
subnet') subnet')
if ip_version and subnet['ip_version'] != ip_version: if ip_version and subnet['ip_version'] != ip_version:
module.fail_json(msg='Cannot update ip_version in existing subnet') module.fail_json(msg='Cannot update ip_version in existing subnet')

View file

@ -500,7 +500,7 @@ def act_on_devices(target_state, module, packet_conn):
created_devices = [create_single_device(module, packet_conn, n) created_devices = [create_single_device(module, packet_conn, n)
for n in create_hostnames] for n in create_hostnames]
if module.params.get('wait'): if module.params.get('wait'):
created_devices = wait_for_ips(module, packet_conn, created_devices) created_devices = wait_for_ips(module, packet_conn, created_devices)
changed = True changed = True
processed_devices = created_devices + process_devices processed_devices = created_devices + process_devices

View file

@ -169,10 +169,10 @@ def get_sshkey_selector(module):
def selector(k): def selector(k):
if 'key' in select_dict: if 'key' in select_dict:
# if key string is specified, compare only the key strings # if key string is specified, compare only the key strings
return k.key == select_dict['key'] return k.key == select_dict['key']
else: else:
# if key string not specified, all the fields must match # if key string not specified, all the fields must match
return all([select_dict[f] == getattr(k,f) for f in select_dict]) return all([select_dict[f] == getattr(k,f) for f in select_dict])
return selector return selector

View file

@ -246,4 +246,4 @@ def main():
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.vca import * from ansible.module_utils.vca import *
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View file

@ -86,10 +86,10 @@ except ImportError:
def find_vswitch_by_name(host, vswitch_name): def find_vswitch_by_name(host, vswitch_name):
for vss in host.config.network.vswitch: for vss in host.config.network.vswitch:
if vss.name == vswitch_name: if vss.name == vswitch_name:
return vss return vss
return None return None
class VMwareHostVirtualSwitch(object): class VMwareHostVirtualSwitch(object):

View file

@ -365,19 +365,19 @@ def parse_check(module):
if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('http'): if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('http'):
return ConsulCheck( return ConsulCheck(
module.params.get('check_id'), module.params.get('check_id'),
module.params.get('check_name'), module.params.get('check_name'),
module.params.get('check_node'), module.params.get('check_node'),
module.params.get('check_host'), module.params.get('check_host'),
module.params.get('script'), module.params.get('script'),
module.params.get('interval'), module.params.get('interval'),
module.params.get('ttl'), module.params.get('ttl'),
module.params.get('notes'), module.params.get('notes'),
module.params.get('http'), module.params.get('http'),
module.params.get('timeout'), module.params.get('timeout'),
module.params.get('service_id'), module.params.get('service_id'),
) )
def parse_service(module): def parse_service(module):

View file

@ -123,11 +123,11 @@ class PublicKey(object):
if not os.path.exists(self.path) or self.force: if not os.path.exists(self.path) or self.force:
try: try:
privatekey_content = open(self.privatekey_path, 'r').read() privatekey_content = open(self.privatekey_path, 'r').read()
privatekey = crypto.load_privatekey(crypto.FILETYPE_PEM, privatekey_content) privatekey = crypto.load_privatekey(crypto.FILETYPE_PEM, privatekey_content)
publickey_file = open(self.path, 'w') publickey_file = open(self.path, 'w')
publickey_file.write(crypto.dump_publickey(crypto.FILETYPE_PEM, privatekey)) publickey_file.write(crypto.dump_publickey(crypto.FILETYPE_PEM, privatekey))
publickey_file.close() publickey_file.close()
except (IOError, OSError): except (IOError, OSError):
e = get_exception() e = get_exception()
raise PublicKeyError(e) raise PublicKeyError(e)

View file

@ -236,4 +236,4 @@ from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View file

@ -194,10 +194,10 @@ def main():
cursor = conn.cursor() cursor = conn.cursor()
except Exception as e: except Exception as e:
if "Unknown database" in str(e): if "Unknown database" in str(e):
errno, errstr = e.args errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else: else:
module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your @sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your @sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
conn.autocommit(True) conn.autocommit(True)
changed = False changed = False

View file

@ -223,7 +223,7 @@ def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype): def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
if not db_exists(cursor, db): if not db_exists(cursor, db):
return False return False
else: else:
db_info = get_db_info(cursor, db) db_info = get_db_info(cursor, db)
if (encoding and if (encoding and

View file

@ -139,7 +139,7 @@ def update_roles(role_facts, cursor, role,
def check(role_facts, role, assigned_roles): def check(role_facts, role, assigned_roles):
role_key = role.lower() role_key = role.lower()
if role_key not in role_facts: if role_key not in role_facts:
return False return False
if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0: if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
return False return False
return True return True

View file

@ -184,12 +184,12 @@ def update_roles(schema_facts, cursor, schema,
cursor.execute("create role {0}".format(role)) cursor.execute("create role {0}".format(role))
cursor.execute("grant usage on schema {0} to {1}".format(schema, role)) cursor.execute("grant usage on schema {0} to {1}".format(schema, role))
for role in set(create_required) - set(create_existing): for role in set(create_required) - set(create_existing):
cursor.execute("grant create on schema {0} to {1}".format(schema, role)) cursor.execute("grant create on schema {0} to {1}".format(schema, role))
def check(schema_facts, schema, usage_roles, create_roles, owner): def check(schema_facts, schema, usage_roles, create_roles, owner):
schema_key = schema.lower() schema_key = schema.lower()
if schema_key not in schema_facts: if schema_key not in schema_facts:
return False return False
if owner and owner.lower() == schema_facts[schema_key]['owner'].lower(): if owner and owner.lower() == schema_facts[schema_key]['owner'].lower():
return False return False
if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0: if cmp(sorted(usage_roles), sorted(schema_facts[schema_key]['usage_roles'])) != 0:

View file

@ -195,7 +195,7 @@ def check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles): locked, password, expired, ldap, roles):
user_key = user.lower() user_key = user.lower()
if user_key not in user_facts: if user_key not in user_facts:
return False return False
if profile and profile != user_facts[user_key]['profile']: if profile and profile != user_facts[user_key]['profile']:
return False return False
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']: if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:

View file

@ -231,16 +231,16 @@ def contentfilter(fsname, pattern):
return True return True
try: try:
f = open(fsname) f = open(fsname)
prog = re.compile(pattern) prog = re.compile(pattern)
for line in f: for line in f:
if prog.match (line): if prog.match (line):
f.close() f.close()
return True return True
f.close() f.close()
except: except:
pass pass
return False return False
@ -337,7 +337,7 @@ def main():
fsname=os.path.normpath(os.path.join(root, fsobj)) fsname=os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']: if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue continue
try: try:
st = os.lstat(fsname) st = os.lstat(fsname)

View file

@ -134,17 +134,17 @@ from ansible.module_utils.basic import AnsibleModule
# match_opt # match_opt
def match_opt(option, line): def match_opt(option, line):
option = re.escape(option) option = re.escape(option)
return re.match(' *%s( |\t)*=' % option, line) \ return re.match(' *%s( |\t)*=' % option, line) \
or re.match('# *%s( |\t)*=' % option, line) \ or re.match('# *%s( |\t)*=' % option, line) \
or re.match('; *%s( |\t)*=' % option, line) or re.match('; *%s( |\t)*=' % option, line)
# ============================================================== # ==============================================================
# match_active_opt # match_active_opt
def match_active_opt(option, line): def match_active_opt(option, line):
option = re.escape(option) option = re.escape(option)
return re.match(' *%s( |\t)*=' % option, line) return re.match(' *%s( |\t)*=' % option, line)
# ============================================================== # ==============================================================
# do_ini # do_ini

View file

@ -443,10 +443,10 @@ def main():
ssh_opts = '-S none' ssh_opts = '-S none'
if not verify_host: if not verify_host:
ssh_opts = '%s -o StrictHostKeyChecking=no' % ssh_opts ssh_opts = '%s -o StrictHostKeyChecking=no' % ssh_opts
if ssh_args: if ssh_args:
ssh_opts = '%s %s' % (ssh_opts, ssh_args) ssh_opts = '%s %s' % (ssh_opts, ssh_args)
if source.startswith('"rsync://') and dest.startswith('"rsync://'): if source.startswith('"rsync://') and dest.startswith('"rsync://'):
module.fail_json(msg='either src or dest must be a localhost', rc=1) module.fail_json(msg='either src or dest must be a localhost', rc=1)

View file

@ -591,7 +591,7 @@ class TgzArchive(object):
self.opts = module.params['extra_opts'] self.opts = module.params['extra_opts']
self.module = module self.module = module
if self.module.check_mode: if self.module.check_mode:
self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name) self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']] self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']]
# Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J # Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
self.cmd_path = self.module.get_bin_path('gtar', None) self.cmd_path = self.module.get_bin_path('gtar', None)

View file

@ -307,7 +307,7 @@ def delete_monitor(module):
def mute_monitor(module): def mute_monitor(module):
monitor = _get_monitor(module) monitor = _get_monitor(module)
if not monitor: if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name']) module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif monitor['options']['silenced']: elif monitor['options']['silenced']:
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
elif (module.params['silenced'] is not None elif (module.params['silenced'] is not None
@ -327,7 +327,7 @@ def mute_monitor(module):
def unmute_monitor(module): def unmute_monitor(module):
monitor = _get_monitor(module) monitor = _get_monitor(module)
if not monitor: if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name']) module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif not monitor['options']['silenced']: elif not monitor['options']['silenced']:
module.exit_json(changed=False) module.exit_json(changed=False)
try: try:

View file

@ -144,21 +144,21 @@ def post_annotation(module):
def main(): def main():
module = AnsibleModule( module = AnsibleModule(
argument_spec = dict( argument_spec = dict(
user = dict(required=True), user = dict(required=True),
api_key = dict(required=True), api_key = dict(required=True),
name = dict(required=False), name = dict(required=False),
title = dict(required=True), title = dict(required=True),
source = dict(required=False), source = dict(required=False),
description = dict(required=False), description = dict(required=False),
start_time = dict(required=False, default=None, type='int'), start_time = dict(required=False, default=None, type='int'),
end_time = dict(require=False, default=None, type='int'), end_time = dict(require=False, default=None, type='int'),
links = dict(type='list') links = dict(type='list')
) )
) )
post_annotation(module) post_annotation(module)
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.urls import * from ansible.module_utils.urls import *

View file

@ -567,8 +567,8 @@ def selector(module):
to take given the right parameters""" to take given the right parameters"""
if module.params["target"] == "host": if module.params["target"] == "host":
target = Host(module.params, module) target = Host(module.params, module)
target.site_facts() target.site_facts()
elif module.params["target"] == "hostgroup": elif module.params["target"] == "hostgroup":
# Validate target specific required parameters # Validate target specific required parameters
if module.params["fullpath"] is not None: if module.params["fullpath"] is not None:

View file

@ -286,29 +286,29 @@ def sensu_check(module, path, name, state='present', backup=False):
reasons.append('`{opt}\' was removed'.format(opt=opt)) reasons.append('`{opt}\' was removed'.format(opt=opt))
if module.params['custom']: if module.params['custom']:
# Convert to json # Convert to json
custom_params = module.params['custom'] custom_params = module.params['custom']
overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end']) overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end'])
if overwrited_fields: if overwrited_fields:
msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields)) msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
module.fail_json(msg=msg) module.fail_json(msg=msg)
for k,v in custom_params.items(): for k,v in custom_params.items():
if k in config['checks'][name]: if k in config['checks'][name]:
if not config['checks'][name][k] == v: if not config['checks'][name][k] == v:
changed = True changed = True
reasons.append('`custom param {opt}\' was changed'.format(opt=k)) reasons.append('`custom param {opt}\' was changed'.format(opt=k))
else: else:
changed = True changed = True
reasons.append('`custom param {opt}\' was added'.format(opt=k)) reasons.append('`custom param {opt}\' was added'.format(opt=k))
check[k] = v check[k] = v
simple_opts += custom_params.keys() simple_opts += custom_params.keys()
# Remove obsolete custom params # Remove obsolete custom params
for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']): for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']):
changed = True changed = True
reasons.append('`custom param {opt}\' was deleted'.format(opt=opt)) reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
del check[opt] del check[opt]
if module.params['metric']: if module.params['metric']:
if 'type' not in check or check['type'] != 'metric': if 'type' not in check or check['type'] != 'metric':

View file

@ -405,7 +405,7 @@ def main():
body = json.dumps(body) body = json.dumps(body)
lower_header_keys = [key.lower() for key in dict_headers] lower_header_keys = [key.lower() for key in dict_headers]
if 'content-type' not in lower_header_keys: if 'content-type' not in lower_header_keys:
dict_headers['Content-Type'] = 'application/json' dict_headers['Content-Type'] = 'application/json'
# Grab all the http headers. Need this hack since passing multi-values is # Grab all the http headers. Need this hack since passing multi-values is
# currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}') # currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}')

View file

@ -514,10 +514,10 @@ class CloudflareAPI(object):
def ensure_dns_record(self,**kwargs): def ensure_dns_record(self,**kwargs):
params = {} params = {}
for param in ['port','priority','proto','proxied','service','ttl','type','record','value','weight','zone']: for param in ['port','priority','proto','proxied','service','ttl','type','record','value','weight','zone']:
if param in kwargs: if param in kwargs:
params[param] = kwargs[param] params[param] = kwargs[param]
else: else:
params[param] = getattr(self,param) params[param] = getattr(self,param)
search_value = params['value'] search_value = params['value']
search_record = params['record'] search_record = params['record']

View file

@ -112,10 +112,10 @@ CL_LICENSE_PATH='/usr/cumulus/bin/cl-license'
def install_license(module): def install_license(module):
# license is not installed, install it # license is not installed, install it
_url = module.params.get('src') _url = module.params.get('src')
(_rc, out, _err) = module.run_command("%s -i %s" % (CL_LICENSE_PATH, _url)) (_rc, out, _err) = module.run_command("%s -i %s" % (CL_LICENSE_PATH, _url))
if _rc > 0: if _rc > 0:
module.fail_json(msg=_err) module.fail_json(msg=_err)
def main(): def main():

View file

@ -431,8 +431,8 @@ class Interfaces(FactsBase):
match = re.search(r'Internet address is (\S+)', data) match = re.search(r'Internet address is (\S+)', data)
if match: if match:
if match.group(1) != "not": if match.group(1) != "not":
addr, masklen = match.group(1).split('/') addr, masklen = match.group(1).split('/')
return dict(address=addr, masklen=int(masklen)) return dict(address=addr, masklen=int(masklen))
def parse_mtu(self, data): def parse_mtu(self, data):
match = re.search(r'MTU (\d+)', data) match = re.search(r'MTU (\d+)', data)
@ -463,7 +463,7 @@ class Interfaces(FactsBase):
def parse_lineprotocol(self, data): def parse_lineprotocol(self, data):
match = re.search(r'line protocol is (\w+[ ]?\w*)\(?.*\)?$', data, re.M) match = re.search(r'line protocol is (\w+[ ]?\w*)\(?.*\)?$', data, re.M)
if match: if match:
return match.group(1) return match.group(1)
def parse_operstatus(self, data): def parse_operstatus(self, data):
match = re.search(r'^(?:.+) is (.+),', data, re.M) match = re.search(r'^(?:.+) is (.+),', data, re.M)

View file

@ -343,7 +343,7 @@ class ExoDnsRecord(ExoDns):
self.result['diff']['before'] = record self.result['diff']['before'] = record
self.result['changed'] = True self.result['changed'] = True
if not self.module.check_mode: if not self.module.check_mode:
self.api_query("/domains/%s/records/%s" % (self.domain, record['record']['id']), "DELETE") self.api_query("/domains/%s/records/%s" % (self.domain, record['record']['id']), "DELETE")
return record return record
def get_result(self, resource): def get_result(self, resource):

View file

@ -342,7 +342,7 @@ def main():
if monitors: if monitors:
monitors = [] monitors = []
for monitor in module.params['monitors']: for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor)) monitors.append(fq_name(partition, monitor))
# sanity check user supplied values # sanity check user supplied values
if state == 'absent' and host is not None: if state == 'absent' and host is not None:

View file

@ -186,7 +186,7 @@ WAIT_INTERVAL=5
###################################################################### ######################################################################
class TimeoutException(Exception): class TimeoutException(Exception):
pass pass
class HAProxy(object): class HAProxy(object):
""" """
@ -356,8 +356,8 @@ class HAProxy(object):
# Report change status # Report change status
if state_before != state_after: if state_before != state_after:
self.command_results['changed'] = True self.command_results['changed'] = True
self.module.exit_json(**self.command_results) self.module.exit_json(**self.command_results)
else: else:
self.command_results['changed'] = False self.command_results['changed'] = False
self.module.exit_json(**self.command_results) self.module.exit_json(**self.command_results)

View file

@ -163,7 +163,7 @@ def main():
if type == 'chat': if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item) module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else: else:
params[item] = module.params[item] params[item] = module.params[item]
elif type == 'inbox': elif type == 'inbox':
module.fail_json(msg="%s is required for the 'inbox' type" % item) module.fail_json(msg="%s is required for the 'inbox' type" % item)

View file

@ -285,7 +285,7 @@ def main():
e = get_exception() e = get_exception()
module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % (host, port, e)) module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' % (host, port, e))
else: else:
module.fail_json(rc=1, msg='Unable to Connect to %s:%s: %s' % (host, port, e)) module.fail_json(rc=1, msg='Unable to Connect to %s:%s: %s' % (host, port, e))
if (secure == 'always'): if (secure == 'always'):

View file

@ -140,14 +140,14 @@ def _is_package_installed(module, name, locallib, cpanm, version):
os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
cmd = "%s perl -e ' use %s" % (cmd, name) cmd = "%s perl -e ' use %s" % (cmd, name)
if version: if version:
cmd = "%s %s;'" % (cmd, version) cmd = "%s %s;'" % (cmd, version)
else: else:
cmd = "%s;'" % cmd cmd = "%s;'" % cmd
res, stdout, stderr = module.run_command(cmd, check_rc=False) res, stdout, stderr = module.run_command(cmd, check_rc=False)
if res == 0: if res == 0:
return True return True
else: else:
return False return False
def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo): def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo):
# this code should use "%s" like everything else and just return early but not fixing all of it now. # this code should use "%s" like everything else and just return early but not fixing all of it now.

View file

@ -265,11 +265,11 @@ class MavenDownloader:
url_to_use = url url_to_use = url
parsed_url = urlparse(url) parsed_url = urlparse(url)
if parsed_url.scheme=='s3': if parsed_url.scheme=='s3':
parsed_url = urlparse(url) parsed_url = urlparse(url)
bucket_name = parsed_url.netloc bucket_name = parsed_url.netloc
key_name = parsed_url.path[1:] key_name = parsed_url.path[1:]
client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', '')) client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10) url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10)
req_timeout = self.module.params.get('timeout') req_timeout = self.module.params.get('timeout')

View file

@ -72,7 +72,7 @@ def main():
changed = current != selection changed = current != selection
if module.check_mode or not changed: if module.check_mode or not changed:
module.exit_json(changed=changed, before=current, after=selection) module.exit_json(changed=changed, before=current, after=selection)
module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True) module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True)
module.exit_json(changed=changed, before=current, after=selection) module.exit_json(changed=changed, before=current, after=selection)

View file

@ -318,7 +318,7 @@ def do_upgrade_packages(module, full=False):
module.fail_json(msg="could not %s packages" % cmd) module.fail_json(msg="could not %s packages" % cmd)
def upgrade_packages(module): def upgrade_packages(module):
do_upgrade_packages(module) do_upgrade_packages(module)
def full_upgrade_packages(module): def full_upgrade_packages(module):
do_upgrade_packages(module, True) do_upgrade_packages(module, True)

View file

@ -320,7 +320,7 @@ class NailGun(object):
if len(repository) == 0: if len(repository) == 0:
if 'releasever' in params: if 'releasever' in params:
reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']}) reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
else: else:
reposet.enable(data={'basearch': params['basearch']}) reposet.enable(data={'basearch': params['basearch']})

View file

@ -134,9 +134,9 @@ class StackiHost:
init_csrftoken = None init_csrftoken = None
for c in cookie_a: for c in cookie_a:
if "csrftoken" in c: if "csrftoken" in c:
init_csrftoken = c.replace("csrftoken=", "") init_csrftoken = c.replace("csrftoken=", "")
init_csrftoken = init_csrftoken.rstrip("\r\n") init_csrftoken = init_csrftoken.rstrip("\r\n")
break break
# Make Header Dictionary with initial CSRF # Make Header Dictionary with initial CSRF
header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken, header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken,

View file

@ -279,7 +279,7 @@ def set_port_disabled_permanent(zone, port, protocol):
def get_source(zone, source): def get_source(zone, source):
fw_zone, fw_settings = get_fw_zone_settings(zone) fw_zone, fw_settings = get_fw_zone_settings(zone)
if source in fw_settings.getSources(): if source in fw_settings.getSources():
return True return True
else: else:
return False return False
@ -317,7 +317,7 @@ def get_interface_permanent(zone, interface):
fw_zone, fw_settings = get_fw_zone_settings(zone) fw_zone, fw_settings = get_fw_zone_settings(zone)
if interface in fw_settings.getInterfaces(): if interface in fw_settings.getInterfaces():
return True return True
else: else:
return False return False

View file

@ -102,13 +102,13 @@ def parse_vgs(data):
return vgs return vgs
def find_mapper_device_name(module, dm_device): def find_mapper_device_name(module, dm_device):
dmsetup_cmd = module.get_bin_path('dmsetup', True) dmsetup_cmd = module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/' mapper_prefix = '/dev/mapper/'
rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
if rc != 0: if rc != 0:
module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
mapper_device = mapper_prefix + dm_name.rstrip() mapper_device = mapper_prefix + dm_name.rstrip()
return mapper_device return mapper_device
def parse_pvs(module, data): def parse_pvs(module, data):
pvs = [] pvs = []

View file

@ -315,15 +315,15 @@ def main():
if not '%' in size: if not '%' in size:
# LVCREATE(8) -L --size option unit # LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe': if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower() size_unit = size[-1].lower()
size = size[0:-1] size = size[0:-1]
try: try:
float(size) float(size)
if not size[0].isdigit(): if not size[0].isdigit():
raise ValueError() raise ValueError()
except ValueError: except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size) module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default # when no unit, megabytes by default
if size_opt == 'l': if size_opt == 'l':

View file

@ -326,10 +326,10 @@ class OSXDefaults(object):
# Current value matches the given value. Nothing need to be done. Arrays need extra care # Current value matches the given value. Nothing need to be done. Arrays need extra care
if self.type == "array" and self.current_value is not None and not self.array_add and \ if self.type == "array" and self.current_value is not None and not self.array_add and \
set(self.current_value) == set(self.value): set(self.current_value) == set(self.value):
return False return False
elif self.type == "array" and self.current_value is not None and self.array_add and \ elif self.type == "array" and self.current_value is not None and self.array_add and \
len(list(set(self.value) - set(self.current_value))) == 0: len(list(set(self.value) - set(self.current_value))) == 0:
return False return False
elif self.current_value == self.value: elif self.current_value == self.value:
return False return False

View file

@ -276,7 +276,7 @@ def main():
module.fail_json(msg="Not any of the command arguments %s given" % commands) module.fail_json(msg="Not any of the command arguments %s given" % commands)
if(params['interface'] is not None and params['direction'] is None): if(params['interface'] is not None and params['direction'] is None):
module.fail_json(msg="Direction must be specified when creating a rule on an interface") module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available # Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True) ufw_bin = module.get_bin_path('ufw', True)

View file

@ -1419,8 +1419,8 @@ class SunOS(User):
cmd.append(','.join(new_groups)) cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment: if self.comment is not None and info[4] != self.comment:
cmd.append('-c') cmd.append('-c')
cmd.append(self.comment) cmd.append(self.comment)
if self.home is not None and info[5] != self.home: if self.home is not None and info[5] != self.home:
if self.move_home: if self.move_home:
@ -1563,7 +1563,7 @@ class DarwinUser(User):
if max_uid < current_uid: if max_uid < current_uid:
max_uid = current_uid max_uid = current_uid
if max_system_uid < current_uid and current_uid < 500: if max_system_uid < current_uid and current_uid < 500:
max_system_uid = current_uid max_system_uid = current_uid
if system and (0 < max_system_uid < 499): if system and (0 < max_system_uid < 499):
return max_system_uid + 1 return max_system_uid + 1
@ -1923,8 +1923,8 @@ class AIX(User):
cmd.append(','.join(groups)) cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment: if self.comment is not None and info[4] != self.comment:
cmd.append('-c') cmd.append('-c')
cmd.append(self.comment) cmd.append(self.comment)
if self.home is not None and info[5] != self.home: if self.home is not None and info[5] != self.home:
if self.move_home: if self.move_home:

View file

@ -395,9 +395,9 @@ class PlayContext(Base):
# become legacy updates -- from commandline # become legacy updates -- from commandline
if not new_info.become_pass: if not new_info.become_pass:
if new_info.become_method == 'sudo' and new_info.sudo_pass: if new_info.become_method == 'sudo' and new_info.sudo_pass:
setattr(new_info, 'become_pass', new_info.sudo_pass) setattr(new_info, 'become_pass', new_info.sudo_pass)
elif new_info.become_method == 'su' and new_info.su_pass: elif new_info.become_method == 'su' and new_info.su_pass:
setattr(new_info, 'become_pass', new_info.su_pass) setattr(new_info, 'become_pass', new_info.su_pass)
# become legacy updates -- from inventory file (inventory overrides # become legacy updates -- from inventory file (inventory overrides
# commandline) # commandline)

View file

@ -127,7 +127,7 @@ class PlaybookInclude(Base, Conditional, Taggable):
''' '''
if v is None: if v is None:
raise AnsibleParserError("include parameter is missing", obj=ds) raise AnsibleParserError("include parameter is missing", obj=ds)
# The include line must include at least one item, which is the filename # The include line must include at least one item, which is the filename
# to include. Anything after that should be regarded as a parameter to the include # to include. Anything after that should be regarded as a parameter to the include

Some files were not shown because too many files have changed in this diff Show more