PEP 8 whitespace cleanup. (#20783)

* PEP 8 E271 whitespace cleanup.
* PEP 8 W293 whitespace cleanup.
* Fix whitespace issue from recent PR.
This commit is contained in:
Matt Clay 2017-01-27 15:45:23 -08:00 committed by GitHub
parent 802fbcadf8
commit 95789f3949
132 changed files with 287 additions and 313 deletions

View file

@ -142,7 +142,7 @@ def generate_inv_from_api(enterprise_entity,config):
break
else:
vm_nic = None
vm_state = True
# From abiquo.ini: Only adding to inventory VMs deployed
if ((config.getboolean('defaults', 'deployed_only') == True) and (vmcollection['state'] == 'NOT_ALLOCATED')):

View file

@ -145,7 +145,7 @@ class LibcloudInventory(object):
self.cache_path_cache = cache_path + "/ansible-libcloud.cache"
self.cache_path_index = cache_path + "/ansible-libcloud.index"
self.cache_max_age = config.getint('cache', 'cache_max_age')
def parse_cli_args(self):
'''
@ -225,7 +225,7 @@ class LibcloudInventory(object):
# Inventory: Group by key pair
if node.extra['key_name']:
self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest)
# Inventory: Group by security group, quick thing to handle single sg
if node.extra['security_group']:
self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest)

View file

@ -289,7 +289,7 @@ class ConsulInventory(object):
and the node name add each entry in the dictionary to the the node's
metadata '''
node = node_data['Node']
if self.config.has_config('kv_metadata'):
if self.config.has_config('kv_metadata'):
key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node'])
index, metadata = self.consul_api.kv.get(key)
if metadata and metadata['Value']:
@ -305,7 +305,7 @@ class ConsulInventory(object):
kv_groups config value and the node name add the node address to each
group found '''
node = node_data['Node']
if self.config.has_config('kv_groups'):
if self.config.has_config('kv_groups'):
key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node'])
index, groups = self.consul_api.kv.get(key)
if groups and groups['Value']:

View file

@ -77,7 +77,7 @@ def get_a_ssh_config(box_name):
if options.list:
ssh_config = get_ssh_config()
hosts = { 'coreos': []}
for data in ssh_config:
hosts['coreos'].append(data['Host'])

View file

@ -18,7 +18,7 @@ def initialize():
except AttributeError:
#FreeIPA < 4.0 compatibility
api.Backend.xmlclient.connect()
return api
def list_groups(api):
@ -39,7 +39,7 @@ def list_groups(api):
if 'member_host' in hostgroup:
members = [host for host in hostgroup['member_host']]
if 'memberindirect_host' in hostgroup:
members += (host for host in hostgroup['memberindirect_host'])
members += (host for host in hostgroup['memberindirect_host'])
inventory[hostgroup['cn'][0]] = {'hosts': [host for host in members]}
for member in members:
@ -48,7 +48,7 @@ def list_groups(api):
inventory['_meta'] = {'hostvars': hostvars}
inv_string = json.dumps(inventory, indent=1, sort_keys=True)
print(inv_string)
return None
def parse_args():

View file

@ -220,7 +220,7 @@ elif options.host:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
print('Host: %s' % options.host)
for k, v in iteritems(host_details):

View file

@ -58,7 +58,7 @@ try:
import requests
except:
sys.exit('requests package is required for this inventory script')
CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']

View file

@ -3,10 +3,10 @@
import optparse
from jinja2 import Environment, FileSystemLoader
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
template_file = 'playbooks_directives.rst.j2'
oblist = {}

View file

@ -59,7 +59,7 @@ class AggregateStats:
if host is None:
host = '_run'
if host not in self.custom:
if host not in self.custom:
self.custom[host] = {which: what}
else:
self.custom[host][which] = what

View file

@ -44,12 +44,12 @@ class GalaxyLogin(object):
''' Class to handle authenticating user with Galaxy API prior to performing CUD operations '''
GITHUB_AUTH = 'https://api.github.com/authorizations'
def __init__(self, galaxy, github_token=None):
self.galaxy = galaxy
self.github_username = None
self.github_password = None
if github_token == None:
self.get_credentials()
@ -61,7 +61,7 @@ class GalaxyLogin(object):
display.display("The password will not be displayed." + u'\n\n', screen_only=True)
display.display("Use " + stringc("--github-token",'yellow') +
" if you do not want to enter your password." + u'\n\n', screen_only=True)
try:
self.github_username = raw_input("Github Username: ")
except:

View file

@ -208,7 +208,7 @@ class GalaxyRole(object):
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
if os.path.isfile(self.src):
# installing a local tar.gz
local_file = True
tmp_file = self.src

View file

@ -42,7 +42,7 @@ class GalaxyToken(object):
self.config = yaml.safe_load(self.__open_config_for_read())
if not self.config:
self.config = {}
def __open_config_for_read(self):
if os.path.isfile(self.file):
display.vvv('Opened %s' % self.file)
@ -57,11 +57,11 @@ class GalaxyToken(object):
def set(self, token):
self.config['token'] = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.file,'w') as f:
yaml.safe_dump(self.config,f,default_flow_style=False)

View file

@ -333,7 +333,7 @@ class AzureRMModuleBase(object):
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
@ -345,11 +345,11 @@ class AzureRMModuleBase(object):
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['subscription_id']:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:

View file

@ -368,7 +368,7 @@ class Facts(object):
self.facts['service_mgr'] = proc_1_map.get(proc_1, proc_1)
# start with the easy ones
elif self.facts['distribution'] == 'MacOSX':
elif self.facts['distribution'] == 'MacOSX':
#FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
self.facts['service_mgr'] = 'launchd'
@ -2155,7 +2155,7 @@ class AIX(Hardware):
rc, out, err = self.module.run_command(cmd)
if rc == 0 and out:
pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)',out).group(1)
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*',m.group(0)):
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*',m.group(0)):
pv_info = { 'pv_name': n.group(1),
'pv_state': n.group(2),
'total_pps': n.group(3),

View file

@ -127,7 +127,7 @@ class Rhsm(RegistrationBase):
for k,v in kwargs.items():
if re.search(r'^(system|rhsm)_', k):
args.append('--%s=%s' % (k.replace('_','.'), v))
self.module.run_command(args, check_rc=True)
@property

View file

@ -644,7 +644,7 @@ def replace(connection, module):
instances = props['instances']
if replace_instances:
instances = replace_instances
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group.min_size
@ -674,7 +674,7 @@ def replace(connection, module):
if not old_instances:
changed = False
return(changed, props)
# set temporary settings and wait for them to be reached
# This should get overwritten if the number of instances left is less than the batch size.
@ -827,7 +827,7 @@ def wait_for_term_inst(connection, module, term_instances):
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
if lifecycle == 'Terminating' or health == 'Unhealthy':
if lifecycle == 'Terminating' or health == 'Unhealthy':
count += 1
time.sleep(10)

View file

@ -148,7 +148,7 @@ class Ec2CustomerGatewayManager:
CustomerGatewayId=gw_id
)
return response
def ensure_cgw_present(self, bgp_asn, ip_address):
response = self.ec2.create_customer_gateway(
DryRun=False,

View file

@ -215,7 +215,7 @@ class ElbInformation(object):
elb_array.append(existing_lb)
else:
elb_array = all_elbs
return list(map(self._get_elb_info, elb_array))
def main():

View file

@ -143,14 +143,14 @@ def main():
resource = module.params.get('resource')
tags = module.params.get('tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
# We need a comparison here so that we can accurately report back changed status.
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
filters = {'resource-id' : resource}
gettags = ec2.get_all_tags(filters=filters)
dictadd = {}
dictremove = {}
baddict = {}
@ -170,7 +170,7 @@ def main():
tagger = ec2.create_tags(resource, dictadd)
gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
if state == 'absent':
if not tags:
module.fail_json(msg="tags argument is required when state is absent")

View file

@ -97,7 +97,7 @@ def get_volume_info(volume):
},
'tags': volume.tags
}
return volume_info
def list_ec2_volumes(connection, module):

View file

@ -289,7 +289,7 @@ def main():
changed = False
new_options = collections.defaultdict(lambda: None)
region, ec2_url, boto_params = get_aws_connection_info(module)
connection = connect_to_aws(boto.vpc, region, **boto_params)
@ -378,9 +378,9 @@ def main():
# and remove old ones if that was requested
if params['delete_old'] and existing_options:
remove_dhcp_options_by_id(connection, existing_options.id)
module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *

View file

@ -72,7 +72,7 @@ def get_vpc_info(vpc):
classic_link = vpc.classic_link_enabled
except AttributeError:
classic_link = False
vpc_info = { 'id': vpc.id,
'instance_tenancy': vpc.instance_tenancy,
'classic_link_enabled': classic_link,

View file

@ -133,7 +133,7 @@ def list_virtual_gateways(client, module):
snaked_vgws = [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw))
for vgw in all_virtual_gateways['VpnGateways']]
module.exit_json(virtual_gateways=snaked_vgws)

View file

@ -457,7 +457,7 @@ def delete_group(module=None, iam=None, name=None):
iam.delete_group_policy(name, policy)
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"

View file

@ -280,7 +280,7 @@ EXAMPLES = '''
command: reboot
instance_name: database
wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
@ -298,7 +298,7 @@ EXAMPLES = '''
tags:
Name: pg1_test_name_tag
register: rds
- local_action:
module: rds
command: modify
@ -844,7 +844,7 @@ def promote_db_instance(module, conn):
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)

View file

@ -130,7 +130,7 @@ def main():
except BotoServerError as e:
if e.error_code != 'DBSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_db_subnet_group(group_name)

View file

@ -470,7 +470,7 @@ def main():
if command_in == 'create':
if ( weight_in!=None or region_in!=None or failover_in!=None ) and identifier_in==None:
module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier")
elif ( weight_in==None and region_in==None and failover_in==None ) and identifier_in!=None:
elif ( weight_in==None and region_in==None and failover_in==None ) and identifier_in!=None:
module.fail_json(msg= "You have specified identifier which makes sense only if you specify one of: weight, region or failover.")

View file

@ -57,7 +57,7 @@ options:
- "Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash character."
required: false
default: index.html
extends_documentation_fragment:
- aws
- ec2
@ -76,14 +76,14 @@ EXAMPLES = '''
- s3_website:
name: mybucket.com
state: absent
# Configure an s3 bucket as a website with index and error pages
- s3_website:
name: mybucket.com
suffix: home.htm
error_key: errors/404.htm
state: present
'''
RETURN = '''
@ -170,7 +170,7 @@ def _create_website_configuration(suffix, error_key, redirect_all_requests):
def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
bucket_name = module.params.get("name")
redirect_all_requests = module.params.get("redirect_all_requests")
# If redirect_all_requests is set then don't use the default suffix that has been set
@ -263,7 +263,7 @@ def main():
redirect_all_requests=dict(type='str', required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [
@ -273,7 +273,7 @@ def main():
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
@ -288,7 +288,7 @@ def main():
enable_or_update_bucket_as_website(client_connection, resource_connection, module)
elif state == 'absent':
disable_bucket_as_website(client_connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *

View file

@ -187,7 +187,7 @@ def create_or_update_sqs_queue(connection, module):
queue = connection.create_queue(queue_name)
update_sqs_queue(queue, **queue_attributes)
result['changed'] = True
if not module.check_mode:
result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn']
result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout']
@ -195,7 +195,7 @@ def create_or_update_sqs_queue(connection, module):
result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize']
result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds']
result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds']
except BotoServerError:
result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)

View file

@ -365,7 +365,7 @@ def create_virtual_machine(module, azure):
azure.get_role(name, name, name)
except AzureMissingException:
# vm does not exist; create it
if os_type == 'linux':
# Create linux configuration
disable_ssh_password_authentication = not password
@ -563,7 +563,7 @@ def main():
cloud_service_raw = None
if module.params.get('state') == 'absent':
(changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('name'):

View file

@ -372,7 +372,7 @@ class AzureRMStorageBlob(AzureRMModuleBase):
self.log('Create container %s' % self.container)
tags = None
if not self.blob and self.tags:
if not self.blob and self.tags:
# when a blob is present, then tags are assigned at the blob level
tags = self.tags

View file

@ -918,7 +918,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
interface_dict['properties'] = nic_dict['properties']
# Expand public IPs to include config properties
for interface in result['properties']['networkProfile']['networkInterfaces']:
for interface in result['properties']['networkProfile']['networkInterfaces']:
for config in interface['properties']['ipConfigurations']:
if config['properties'].get('publicIPAddress'):
pipid_dict = azure_id_to_dict(config['properties']['publicIPAddress']['id'])

View file

@ -347,7 +347,7 @@ class AzureRMVirtualNetwork(AzureRMModuleBase):
try:
poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)
new_vnet = self.get_poller_result(poller)
except Exception as exc:
except Exception as exc:
self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc)))
return virtual_network_to_dict(new_vnet)

View file

@ -232,7 +232,7 @@ class AnsibleCloudStackInstanceFacts(AnsibleCloudStack):
if not instance:
self.module.fail_json(msg="Instance not found: %s" % self.module.params.get('name'))
self.facts['cloudstack_instance'] = self.get_result(instance)
return self.facts
return self.facts
def get_result(self, instance):

View file

@ -293,7 +293,7 @@ class ImageManager(DockerBaseClass):
if repo_tag:
self.name = repo
self.tag = repo_tag
if self.state in ['present', 'build']:
self.present()
elif self.state == 'absent':

View file

@ -639,7 +639,7 @@ class ContainerManager(DockerBaseClass):
return options
def cmd_up(self):
start_deps = self.dependencies
service_names = self.services
detached = True
@ -943,7 +943,7 @@ class ContainerManager(DockerBaseClass):
short_id=container.short_id
))
result['actions'].append(service_res)
if not self.check_mode and result['changed']:
_, fd_name = tempfile.mkstemp(prefix="ansible")
try:

View file

@ -165,7 +165,7 @@ def grant_check(module, gs, obj):
module.fail_json(msg= str(e))
return True
def key_check(module, gs, bucket, obj):
try:
@ -228,7 +228,7 @@ def delete_key(module, gs, bucket, obj):
module.exit_json(msg="Object deleted from bucket ", changed=True)
except gs.provider.storage_response_error as e:
module.fail_json(msg= str(e))
def create_dirkey(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
@ -325,7 +325,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
else:
upload_gsfile(module, gs, bucket, obj, src, expiration)
if not bucket_rc:
create_bucket(module, gs, bucket)
upload_gsfile(module, gs, bucket, obj, src, expiration)
@ -333,7 +333,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
# If bucket exists but key doesn't, just upload.
if bucket_rc and not key_rc:
upload_gsfile(module, gs, bucket, obj, src, expiration)
def handle_delete(module, gs, bucket, obj):
if bucket and not obj:
if bucket_check(module, gs, bucket):
@ -350,7 +350,7 @@ def handle_delete(module, gs, bucket, obj):
module.exit_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
def handle_create(module, gs, bucket, obj):
if bucket and not obj:
if bucket_check(module, gs, bucket):
@ -417,7 +417,7 @@ def main():
gs = boto.connect_gs(gs_access_key, gs_secret_key)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
if mode == 'get':
if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
module.fail_json(msg="Target bucket/key cannot be found", failed=True)
@ -425,7 +425,7 @@ def main():
download_gsfile(module, gs, bucket, obj, dest)
else:
handle_get(module, gs, bucket, obj, overwrite, dest)
if mode == 'put':
if not path_check(src):
module.fail_json(msg="Local object for PUT does not exist", failed=True)
@ -434,10 +434,10 @@ def main():
# Support for deleting an object if we have both params.
if mode == 'delete':
handle_delete(module, gs, bucket, obj)
if mode == 'create':
handle_create(module, gs, bucket, obj)
if mode == 'get_url':
if bucket and obj:
if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):

View file

@ -93,7 +93,7 @@ EXAMPLES = '''
# Basic zone creation example.
- name: Create a basic zone with the minimum number of parameters.
gcdns_zone: zone=example.com
# Zone removal example.
- name: Remove a zone.
gcdns_zone: zone=example.com state=absent

View file

@ -103,7 +103,7 @@ EXAMPLES = '''
- gce_img:
name: test-image
source: https://storage.googleapis.com/bucket/path/to/image.tgz
# Alternatively use the gs scheme
- gce_img:
name: test-image

View file

@ -194,7 +194,7 @@ def publish_messages(message_list, topic):
attrs = message['attributes']
batch.publish(bytes(msg), **attrs)
return True
def pull_messages(pull_params, sub):
"""
:rtype: tuple (output, changed)
@ -203,7 +203,7 @@ def pull_messages(pull_params, sub):
max_messages=pull_params.get('max_messages', None)
message_ack = pull_params.get('message_ack', 'no')
return_immediately = pull_params.get('return_immediately', False)
output= []
pulled = sub.pull(return_immediately=return_immediately,
max_messages=max_messages)
@ -237,7 +237,7 @@ def main():
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.")

View file

@ -98,7 +98,7 @@ except ImportError as e:
def list_func(data, member='name'):
"""Used for state=list."""
return [getattr(x, member) for x in data]
def main():
module = AnsibleModule(argument_spec=dict(
@ -112,7 +112,7 @@ def main():
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_GOOGLE_CLOUD_PUBSUB:
module.fail_json(msg="Please install google-cloud-pubsub library.")

View file

@ -247,7 +247,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino
# - need linode_id (entity)
# - need disk_id for linode_id - create disk from distrib
# - need config_id for linode_id - create config (need kernel)
# Any create step triggers a job that need to be waited for.
if not servers:
for arg in (name, plan, distribution, datacenter):
@ -424,7 +424,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino
instance['status'] = 'Restarting'
changed = True
instances.append(instance)
elif state in ('absent', 'deleted'):
for server in servers:
instance = getInstanceDetails(api, server)

View file

@ -306,7 +306,7 @@ def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork,
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
try:
conn.vms.add(vmparams)
except:
@ -502,7 +502,7 @@ def main():
else:
vm_stop(c, vmname)
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
if state == 'restart':
if vm_status(c, vmname) == 'up':
vm_restart(c, vmname)

View file

@ -1038,7 +1038,7 @@ def main():
time.sleep(1)
except Exception as e:
module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'current':
status = {}
try:

View file

@ -421,7 +421,7 @@ class VirtNetwork(object):
def create(self, entryid):
return self.conn.create(entryid)
def modify(self, entryid, xml):
return self.conn.modify(entryid, xml)

View file

@ -180,7 +180,7 @@ def _add_gateway_router(neutron, module, router_id, network_id):
module.fail_json(msg = "Error in adding gateway to router: %s" % e.message)
return True
def _remove_gateway_router(neutron, module, router_id):
def _remove_gateway_router(neutron, module, router_id):
try:
neutron.remove_gateway_router(router_id)
except Exception as e:

View file

@ -193,7 +193,7 @@ def _get_port_id(neutron, module, router_id, subnet_id):
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
for port in ports['ports']:
for port in ports['ports']:
for subnet in port['fixed_ips']:
if subnet['subnet_id'] == subnet_id:
return port['id']
@ -209,7 +209,7 @@ def _add_interface_router(neutron, module, router_id, subnet_id):
module.fail_json(msg = "Error in adding interface to router: %s" % e.message)
return True
def _remove_interface_router(neutron, module, router_id, subnet_id):
def _remove_interface_router(neutron, module, router_id, subnet_id):
kwargs = {
'subnet_id': subnet_id
}

View file

@ -205,7 +205,7 @@ def main():
else:
if masters is None:
masters = []
pre_update_zone = zone
changed = _system_state_change(state, email,
description, ttl,

View file

@ -346,7 +346,7 @@ def get_hostname_list(module):
_msg = ("If you set count>1, you should only specify one hostname "
"with the %d formatter, not a list of hostnames.")
raise Exception(_msg)
if (len(hostnames) == 1) and (count > 0):
hostname_spec = hostnames[0]
count_range = range(count_offset, count_offset + count)
@ -382,7 +382,7 @@ def get_device_id_list(module):
raise Exception("You specified too many devices, max is %d" %
MAX_DEVICES)
return device_ids
def create_single_device(module, packet_conn, hostname):
@ -430,7 +430,7 @@ def wait_for_ips(module, packet_conn, created_devices):
if all_have_public_ip(refreshed):
return refreshed
time.sleep(5)
raise Exception("Waiting for IP assignment timed out. Hostnames: %s"
% [d.hostname for d in created_devices])

View file

@ -204,7 +204,7 @@ def act_on_sshkeys(target_state, module, packet_conn):
new_key_response = packet_conn.create_ssh_key(
newkey['label'], newkey['key'])
changed = True
matching_sshkeys.append(new_key_response)
else:
# state is 'absent' => delete mathcing keys

View file

@ -101,9 +101,9 @@ def rax_facts(module, address, name, server_id):
servers.append(cs.servers.get(server_id))
except Exception as e:
pass
servers[:] = [server for server in servers if server.status != "DELETED"]
if len(servers) > 1:
module.fail_json(msg='Multiple servers found matching provided '
'search parameters')

View file

@ -93,7 +93,7 @@ def find_vswitch_by_name(host, vswitch_name):
class VMwareHostVirtualSwitch(object):
def __init__(self, module):
self.host_system = None
self.content = None
@ -132,7 +132,7 @@ class VMwareHostVirtualSwitch(object):
# Source from
# https://github.com/rreubenur/pyvmomi-community-samples/blob/patch-1/samples/create_vswitch.py
def state_create_vswitch(self):
vss_spec = vim.host.VirtualSwitch.Specification()
vss_spec.numPorts = self.number_of_ports
@ -146,7 +146,7 @@ class VMwareHostVirtualSwitch(object):
def state_destroy_vswitch(self):
config = vim.host.NetworkConfig()
for portgroup in self.host_system.configManager.networkSystem.networkInfo.portgroup:
if portgroup.spec.vswitchName == self.vss.name:
portgroup_config = vim.host.PortGroup.Config()
@ -158,7 +158,7 @@ class VMwareHostVirtualSwitch(object):
portgroup_config.spec.vswitchName = portgroup.spec.vswitchName
portgroup_config.spec.policy = vim.host.NetworkPolicy()
config.portgroup.append(portgroup_config)
self.host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify")
self.host_system.configManager.networkSystem.RemoveVirtualSwitch(self.vss.name)
self.module.exit_json(changed=True)
@ -170,15 +170,15 @@ class VMwareHostVirtualSwitch(object):
host = get_all_objs(self.content, [vim.HostSystem])
if not host:
self.module.fail_json(msg="Unable to find host")
self.host_system = host.keys()[0]
self.vss = find_vswitch_by_name(self.host_system, self.switch_name)
if self.vss is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()

View file

@ -999,7 +999,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
except (KeyError, ValueError):
vsphere_client.disconnect()
module.fail_json(msg="Error in '%s' definition. Size needs to be specified as an integer." % disk)
# Make sure the new disk size is higher than the current value
dev = dev_list[disk_num]
if disksize < int(dev.capacityInKB):

View file

@ -148,7 +148,7 @@ def main():
existing_app = app_map.get(app_name)
result = {}
# Here's where the real stuff happens
if app_state == 'present':

View file

@ -84,7 +84,7 @@ options:
EXAMPLES = '''
# This will also create a default DB user with the same
# name as the database, and the specified password.
- name: Create a database
webfaction_db:
name: "{{webfaction_user}}_db1"
@ -145,7 +145,7 @@ def main():
existing_user = user_map.get(db_name)
result = {}
# Here's where the real stuff happens
if db_state == 'present':
@ -175,16 +175,16 @@ def main():
# If this isn't a dry run...
if not module.check_mode:
if not (existing_db or existing_user):
module.exit_json(changed = False,)
if existing_db:
# Delete the db if it exists
result.update(
webfaction.delete_db(session_id, db_name, db_type)
)
if existing_user:
# Delete the default db user if it exists
result.update(

View file

@ -121,7 +121,7 @@ def main():
existing_domain = domain_map.get(domain_name)
result = {}
# Here's where the real stuff happens
if domain_state == 'present':

View file

@ -107,7 +107,7 @@ def main():
existing_mailbox = mailbox_name in mailbox_list
result = {}
# Here's where the real stuff happens
if site_state == 'present':

View file

@ -53,7 +53,7 @@ options:
required: false
choices: ['present', 'absent']
default: "present"
host:
description:
- The webfaction host on which the site should be created.
@ -141,7 +141,7 @@ def main():
existing_site = site_map.get(site_name)
result = {}
# Here's where the real stuff happens
if site_state == 'present':

View file

@ -395,7 +395,7 @@ def parse_service(module):
module.fail_json( msg="service_name supplied but no service_port, a port is required to configure a service. Did you configure the 'port' argument meaning 'service_port'?")
class ConsulService():
class ConsulService():
def __init__(self, service_id=None, name=None, address=None, port=-1,
tags=None, loaded=None):

View file

@ -195,7 +195,7 @@ class PrivateKey(object):
}
return result
def main():

View file

@ -163,7 +163,7 @@ class PublicKey(object):
}
return result
def main():

View file

@ -161,7 +161,7 @@ def install_plugin(module, plugin_bin, plugin_name, version, url, proxy_host, pr
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name):
@ -177,7 +177,7 @@ def remove_plugin(module, plugin_bin, plugin_name):
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def main():

View file

@ -181,7 +181,7 @@ def install_plugin(module, plugin_bin, plugin_name, url, timeout):
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name):
@ -191,12 +191,12 @@ def remove_plugin(module, plugin_bin, plugin_name):
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def main():

View file

@ -322,7 +322,7 @@ def main():
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e)
try:
old_value = r.config_get(name)[name]
except Exception:

View file

@ -179,7 +179,7 @@ def main():
ring_size = stats['ring_creation_size']
rc, out, err = module.run_command([riak_bin, 'version'] )
version = out.strip()
result = dict(node_name=node_name,
nodes=nodes,
ring_size=ring_size,

View file

@ -181,7 +181,7 @@ def main():
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_querystring = login_host
if login_port != "1433":
login_querystring = "%s:%s" % (login_host, login_port)

View file

@ -178,7 +178,7 @@ def main():
else:
if state == "absent":
changed = ext_delete(cursor, ext)
elif state == "present":
changed = ext_create(cursor, ext)
except NotSupportedError:

View file

@ -253,21 +253,21 @@ def main():
if lang_exists(cursor, lang):
lang_trusted = lang_istrusted(cursor, lang)
if (lang_trusted and not trust) or (not lang_trusted and trust):
if module.check_mode:
if module.check_mode:
changed = True
else:
changed = lang_altertrust(cursor, lang, trust)
else:
if module.check_mode:
if module.check_mode:
changed = True
else:
changed = lang_add(cursor, lang, trust)
if force_trust:
changed = lang_altertrust(cursor, lang, trust)
else:
if lang_exists(cursor, lang):
if module.check_mode:
if module.check_mode:
changed = True
kw['lang_dropped'] = True
else:

View file

@ -688,7 +688,7 @@ def main():
module.fail_json(msg=str(e))
else:
if user_exists(cursor, user):
if module.check_mode:
if module.check_mode:
changed = True
kw['user_removed'] = True
else:

View file

@ -116,7 +116,7 @@ def get_configuration_facts(cursor, parameter_name=''):
'current_value': row.current_value,
'default_value': row.default_value}
return facts
def check(configuration_facts, parameter_name, current_value):
parameter_key = parameter_name.lower()
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():

View file

@ -135,7 +135,7 @@ def update_roles(role_facts, cursor, role,
cursor.execute("revoke {0} from {1}".format(assigned_role, role))
for assigned_role in set(required) - set(existing):
cursor.execute("grant {0} to {1}".format(assigned_role, role))
def check(role_facts, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:

View file

@ -182,7 +182,7 @@ def main():
# patch need an absolute file name
p.src = os.path.abspath(p.src)
changed = False
if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip):
try:

View file

@ -121,12 +121,12 @@ def build_url(name, apiid, action, meter_id=None, cert_type=None):
elif action == "certificates":
return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type)
elif action == "tags":
return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
elif action == "delete":
return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
if meter_id is None:
url = build_url(name, apiid, action)
else:

View file

@ -123,7 +123,7 @@ def main():
params["application_id"] = module.params["application_id"]
else:
module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]:
if module.params[item]:
params[item] = module.params[item]

View file

@ -209,7 +209,7 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu
'Content-Type' : 'application/json',
}
request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': service}}
if requester_id:
request_data['requester_id'] = requester_id
else:
@ -235,7 +235,7 @@ def absent(module, name, user, passwd, token, requester_id, service):
'Content-Type' : 'application/json',
}
request_data = {}
if requester_id:
request_data['requester_id'] = requester_id
else:

View file

@ -376,10 +376,10 @@ class Host(object):
if host['proxy_hostid'] != proxy_id:
return True
if host['name'] != visible_name:
return True
return False
# link or clear template of the host

View file

@ -123,7 +123,7 @@ def chain(module):
if state in ('absent') and not config_present:
module.exit_json(changed=False)
if state in ('present'):
response = rest.put('chain[name="%s"]' % name, data={'name': name})
if response.status_code == 204:

View file

@ -391,7 +391,7 @@ class CloudflareAPI(object):
error_msg += "; Failed to parse API response: {0}".format(content)
# received an error status but no data with details on what failed
if (info['status'] not in [200,304]) and (result is None):
if (info['status'] not in [200,304]) and (result is None):
self.module.fail_json(msg=error_msg)
if not result['success']:

View file

@ -183,14 +183,14 @@ class Default(FactsBase):
return sw_name.text
else:
return ""
def parse_version(self, data):
sw_ver = data.find('./data/system-sw-state/sw-version/sw-version')
if sw_ver is not None:
return sw_ver.text
else:
return ""
def parse_hostname(self, data):
match = re.search(r'hostname\s+(\S+)', data, re.M)
if match:
@ -224,7 +224,7 @@ class Hardware(FactsBase):
self.facts['cpu_arch'] = self.parse_cpu_arch(xml_data)
data = self.runner.get_command('show processes memory | grep Total')
match = self.parse_memory(data)
if match:
self.facts['memtotal_mb'] = int(match[0]) / 1024
@ -236,7 +236,7 @@ class Hardware(FactsBase):
return cpu_arch.text
else:
return ""
def parse_memory(self, data):
return re.findall(r'\:\s*(\d+)', data, re.M)
@ -372,7 +372,7 @@ class Interfaces(FactsBase):
lldp_facts[name].append(fact)
return lldp_facts
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,

View file

@ -327,7 +327,7 @@ class Interfaces(FactsBase):
return match.group(3)
if flag==1:
return "null"
def parse_type(self, key, properties):
type_val, type_val_next = properties.split('--------- ------- --------------------- --------------------- --------------')
flag=1

View file

@ -31,25 +31,25 @@ options:
- Account API Key.
required: true
default: null
account_secret:
description:
- Account Secret Key.
required: true
default: null
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster resolution.
required: true
default: null
record_name:
description:
- Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless of the state argument.
required: false
default: null
record_type:
description:
- Record type.
@ -63,20 +63,20 @@ options:
- "If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, this module can be used to fetch a record's current id, type, and ttl)"
required: false
default: null
record_ttl:
description:
- record's "Time to live". Number of seconds the record remains cached in DNS servers.
required: false
default: 1800
state:
description:
- whether the record should exist or not
required: true
choices: [ 'present', 'absent' ]
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
@ -89,7 +89,7 @@ options:
notes:
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP.
- This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
requirements: [ hashlib, hmac ]
author: "Brice Burgess (@briceburg)"
'''
@ -102,7 +102,7 @@ EXAMPLES = '''
domain: my.com
state: present
register: response
# create / ensure the presence of a record
- dnsmadeeasy:
account_key: key
@ -130,7 +130,7 @@ EXAMPLES = '''
state: present
record_name: test
register: response
# delete a record / ensure it is absent
- dnsmadeeasy:
account_key: key

View file

@ -283,7 +283,7 @@ def map_config_to_obj(module):
'state': parse_state(out)
}
def map_params_to_obj(module):
def map_params_to_obj(module):
obj = {
'http': module.params['http'],
'http_port': module.params['http_port'],
@ -310,7 +310,7 @@ def collect_facts(module, result):
for each in out[0]['urls']:
intf, url = each.split(' : ')
key = str(intf).strip()
if key not in facts['eos_eapi_urls']:
if key not in facts['eos_eapi_urls']:
facts['eos_eapi_urls'][key] = list()
facts['eos_eapi_urls'][key].append(str(url).strip())
result['ansible_facts'] = facts

View file

@ -38,7 +38,7 @@ EXAMPLES = '''
# Retrieve switch/port information
- name: Gather information from lldp
lldp:
- name: Print each switch/port
debug:
msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }}"
@ -73,7 +73,7 @@ def gather_lldp():
current_dict = current_dict[path_component]
current_dict[final] = value
return output_dict
def main():
module = AnsibleModule({})
@ -84,7 +84,7 @@ def main():
module.exit_json(ansible_facts=data)
except TypeError:
module.fail_json(msg="lldpctl command failed. is lldpd running?")
# import module snippets
from ansible.module_utils.basic import *

View file

@ -492,7 +492,7 @@ def main():
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
server_type = module.params['server_type']
global_key = module.params['global_key']
encrypt_type = module.params['encrypt_type']

View file

@ -428,7 +428,7 @@ def _match_dict(match_list, key_map):
def get_aaa_host_info(module, server_type, address):
aaa_host_info = {}
command = 'show run | inc {0}-server.host.{1}'.format(server_type, address)
body = execute_show_command(command, module, command_type='cli_show_ascii')
if body:
@ -574,7 +574,7 @@ def main():
results['updates'] = cmds
results['changed'] = changed
results['end_state'] = end_state
module.exit_json(**results)

View file

@ -612,7 +612,7 @@ def get_custom_string_value(config, arg, module):
elif arg.startswith('dampening'):
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(
PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
if arg == 'dampen_igp_metric' or arg == 'dampening_routemap':
if arg == 'dampen_igp_metric' or arg == 'dampening_routemap':
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')

View file

@ -295,7 +295,7 @@ def main():
supports_check_mode=True)
splitted_ssm_range = module.params['ssm_range'].split('.')
if len(splitted_ssm_range) != 4 and module.params['ssm_range'] != 'none':
if len(splitted_ssm_range) != 4 and module.params['ssm_range'] != 'none':
module.fail_json(msg="Valid ssm_range values are multicast addresses "
"or the keyword 'none'.")

View file

@ -495,10 +495,10 @@ def get_interface_mode(interface, intf_type, module):
def get_pim_interface(module, interface):
pim_interface = {}
command = 'show ip pim interface {0}'.format(interface)
body = execute_show_command(command, module,
command_type='cli_show_ascii', text=True)
if body:
if 'not running' not in body[0]:
body = execute_show_command(command, module)
@ -552,7 +552,7 @@ def get_pim_interface(module, interface):
return {}
command = 'show run interface {0}'.format(interface)
body = execute_show_command(command, module, command_type='cli_show_ascii')
jp_configs = []

View file

@ -378,7 +378,7 @@ def main():
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
location = module.params['location']
state = module.params['state']

View file

@ -463,7 +463,7 @@ def main():
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
group = module.params['group'].lower()
state = module.params['state']

View file

@ -97,7 +97,7 @@ def wakeonlan(module, mac, broadcast, port):
int(mac, 16)
except ValueError:
module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
# Create payload for magic packet
data = ''
padding = ''.join(['FFFFFFFFFFFF', mac * 20])

View file

@ -142,7 +142,7 @@ def main():
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
else:
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
params = {}
# required params

View file

@ -782,7 +782,7 @@ def get_cache(module):
else:
module.fail_json(msg=str(e))
return cache
def main():
module = AnsibleModule(

View file

@ -110,7 +110,7 @@ def update_package_db(module):
module.fail_json(msg="could not update package db: %s" % err)
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
@ -122,7 +122,7 @@ def remove_packages(module, packages):
if rc != 0:
module.fail_json(msg="failed to remove %s: %s" % (package, err))
remove_c += 1
if remove_c > 0:
@ -162,7 +162,7 @@ def main():
state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
@ -182,6 +182,6 @@ def main():
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()

View file

@ -158,7 +158,7 @@ def install_overlay(module, name, list_url=None):
if layman.is_installed(name):
return False
if module.check_mode:
mymsg = 'Would add layman repo \'' + name + '\''
module.exit_json(changed=True, msg=mymsg)
@ -195,7 +195,7 @@ def uninstall_overlay(module, name):
if not layman.is_installed(name):
return False
if module.check_mode:
mymsg = 'Would remove layman repo \'' + name + '\''
module.exit_json(changed=True, msg=mymsg)

View file

@ -144,18 +144,18 @@ def main():
saturl = module.params['url']
user = module.params['user']
password = module.params['password']
#initialize connection
client = xmlrpclib.Server(saturl, verbose=0)
session = client.auth.login(user, password)
# get systemid
sys_id = get_systemid(client, session, systname)
# get channels for system
chans = base_channels(client, session, sys_id)
if state == 'present':
if channelname in chans:
module.exit_json(changed=False, msg="Channel %s already exists" % channelname)

View file

@ -119,10 +119,10 @@ def update_package_db(module):
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db")
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
@ -135,7 +135,7 @@ def remove_packages(module, packages):
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
@ -189,7 +189,7 @@ def main():
force = dict(default=True, type='bool'),
no_recommends = dict(default=True, aliases=['no-recommends'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(URPMI_PATH):
module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH))
@ -212,6 +212,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()

View file

@ -390,12 +390,12 @@ def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di
except Exception:
e = get_exception()
module.fail_json(msg="Failure talking to yum: %s" % e)
return [ po_to_nevra(p) for p in pkgs ]
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(dis_repos)]
myrepoq.extend(r_cmd)
@ -442,7 +442,7 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_r
for pkg in pkgs:
if pkg in updates:
retpkgs.append(pkg)
return set([ po_to_nevra(p) for p in retpkgs ])
else:
@ -455,12 +455,12 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_r
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
if rc == 0:
return set([ p for p in out.split('\n') if p.strip() ])
else:
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
return set()
def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, dis_repos=None, installroot='/'):
@ -725,7 +725,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos, i
if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot):
found = True
res['results'].append('package providing %s is already installed' % (spec))
if found:
continue

View file

@ -373,7 +373,7 @@ def package_present(m, name, want_latest):
def package_update_all(m):
"run update or patch on all available packages"
retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
if m.params['type'] == 'patch':
cmdname = 'patch'

View file

@ -142,7 +142,7 @@ EXAMPLES = '''
- zypper_repository:
repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
auto_import_keys: yes
# Force refresh of a repository
- zypper_repository:
repo: 'http://my_internal_ci_repo/repo

Some files were not shown because too many files have changed in this diff Show more