minor spelling changes

This commit is contained in:
Carlos E. Garcia 2016-12-10 21:50:09 -05:00 committed by Brian Coca
parent 054a3fccf8
commit 0b8011436d
114 changed files with 152 additions and 152 deletions

View file

@ -1782,7 +1782,7 @@ New modules and plugins.
* rax_clb *-- manages Rackspace cloud load balancers* * rax_clb *-- manages Rackspace cloud load balancers*
- files - files
* acl *-- set or get acls on a file* * acl *-- set or get acls on a file*
* synchronize *-- a useful wraper around rsyncing trees of files* * synchronize *-- a useful wrapper around rsyncing trees of files*
* unarchive *-- pushes and extracts tarballs* * unarchive *-- pushes and extracts tarballs*
- system - system
* blacklist *-- add or remove modules from the kernel blacklist* * blacklist *-- add or remove modules from the kernel blacklist*

View file

@ -75,7 +75,7 @@ all_instances = False
# By default, only EC2 instances in the 'running' state are returned. Specify # By default, only EC2 instances in the 'running' state are returned. Specify
# EC2 instance states to return as a comma-separated list. This # EC2 instance states to return as a comma-separated list. This
# option is overriden when 'all_instances' is True. # option is overridden when 'all_instances' is True.
# instance_states = pending, running, shutting-down, terminated, stopping, stopped # instance_states = pending, running, shutting-down, terminated, stopping, stopped
# By default, only RDS instances in the 'available' state are returned. Set # By default, only RDS instances in the 'available' state are returned. Set

View file

@ -676,7 +676,7 @@ class Ec2Inventory(object):
try: try:
# Boto also doesn't provide wrapper classes to CacheClusters or # Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that wo can't make use of the get_list # CacheNodes. Because of that we can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually # method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
@ -710,7 +710,7 @@ class Ec2Inventory(object):
try: try:
# Boto also doesn't provide wrapper classes to ReplicationGroups # Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that wo can't make use of the get_list method in the # Because of that we can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually # AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']

View file

@ -312,7 +312,7 @@ class GceInventory(object):
return gce return gce
def parse_env_zones(self): def parse_env_zones(self):
'''returns a list of comma seperated zones parsed from the GCE_ZONE environment variable. '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call''' If provided, this will be used to filter the results of the grouped_instances call'''
import csv import csv
reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True) reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)

View file

@ -14,7 +14,7 @@ disable_ssl_certificate_validation = True
# Your Rudder API token, created in the Web interface. # Your Rudder API token, created in the Web interface.
token = aaabbbccc token = aaabbbccc
# Rudder API version to use, use "latest" for lastest available # Rudder API version to use, use "latest" for latest available
# version. # version.
version = latest version = latest
@ -23,7 +23,7 @@ version = latest
group_name = displayName group_name = displayName
# Fail if there are two groups with the same name or two hosts with the # Fail if there are two groups with the same name or two hosts with the
# same hostname in the output. # same hostname in the output.
fail_if_name_collision = True fail_if_name_collision = True
# We cache the results of Rudder API in a local file # We cache the results of Rudder API in a local file

View file

@ -5,7 +5,7 @@
# TODO: # TODO:
# * more jq examples # * more jq examples
# * optional folder heirarchy # * optional folder heriarchy
""" """
$ jq '._meta.hostvars[].config' data.json | head $ jq '._meta.hostvars[].config' data.json | head

View file

@ -131,7 +131,7 @@ Purge the checkout after the playbook is run.
*-s* 'SLEEP', *--sleep=*'SLEEP':: *-s* 'SLEEP', *--sleep=*'SLEEP'::
Sleep for random interval (between 0 and SLEEP number of seconds) before starting. This is a useful way ot disperse git requests. Sleep for random interval (between 0 and SLEEP number of seconds) before starting. This is a useful way to disperse git requests.
*--ssh-common-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...'':: *--ssh-common-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...''::

View file

@ -2,7 +2,7 @@
* searchtools.js_t * searchtools.js_t
* ~~~~~~~~~~~~~~~~ * ~~~~~~~~~~~~~~~~
* *
* Sphinx JavaScript utilties for the full-text search. * Sphinx JavaScript utilities for the full-text search.
* *
* :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details. * :license: BSD, see LICENSE for details.

View file

@ -20,7 +20,7 @@ privilege escalation tools, which you probably already use or have configured, l
Directives Directives
----------- -----------
These can be set from play to task level, but are overriden by connection variables as they can be host specific. These can be set from play to task level, but are overridden by connection variables as they can be host specific.
become become
set to 'true'/'yes' to activate privilege escalation. set to 'true'/'yes' to activate privilege escalation.

View file

@ -57,7 +57,7 @@ Individuals with direct commit access to ansible/ansible (+core, + extras) are e
- Be active. Committers who have no activity on the project (through merges, triage, commits, etc.) will have their permissions suspended. - Be active. Committers who have no activity on the project (through merges, triage, commits, etc.) will have their permissions suspended.
- Consider backwards compatibility (goes back to "dont break existing playbooks"). - Consider backwards compatibility (goes back to "dont break existing playbooks").
- Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes. - Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes.
- Discuss with other commiters, specially when you are unsure of something. - Discuss with other committers, specially when you are unsure of something.
- Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so. It also helps to add the version of Core against which this documentation is compatible (to avoid confusion with stable versus devel docs, for backwards compatibility, etc.). - Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so. It also helps to add the version of Core against which this documentation is compatible (to avoid confusion with stable versus devel docs, for backwards compatibility, etc.).
- Consider scope, sometimes a fix can be generalized - Consider scope, sometimes a fix can be generalized
- Keep it simple, then things are maintainable, debuggable and intelligible. - Keep it simple, then things are maintainable, debuggable and intelligible.

View file

@ -12,11 +12,11 @@ The following is a list of module_utils files and a general description. The mod
- api.py - Adds shared support for generic API modules. - api.py - Adds shared support for generic API modules.
- asa.py - Module support utilities for managing Cisco ASA network devices. - asa.py - Module support utilities for managing Cisco ASA network devices.
- azure_rm_common.py - Definitions and utilities for Microsoft Azure Resource Manager template deployments. - azure_rm_common.py - Definitions and utilities for Microsoft Azure Resource Manager template deployments.
- basic.py - General definitions and helper utilites for Ansible modules. - basic.py - General definitions and helper utilities for Ansible modules.
- cloudstack.py - Utilities for CloudStack modules. - cloudstack.py - Utilities for CloudStack modules.
- database.py - Miscellaneous helper functions for PostGRES and MySQL - database.py - Miscellaneous helper functions for PostGRES and MySQL
- docker_common.py - Definitions and helper utilites for modules working with Docker. - docker_common.py - Definitions and helper utilities for modules working with Docker.
- ec2.py - Definitions and utilites for modules working with Amazon EC2 - ec2.py - Definitions and utilities for modules working with Amazon EC2
- eos.py - Helper functions for modules working with EOS networking devices. - eos.py - Helper functions for modules working with EOS networking devices.
- f5.py - Helper functions for modules working with F5 networking devices. - f5.py - Helper functions for modules working with F5 networking devices.
- facts.py - Helper functions for modules that return facts. - facts.py - Helper functions for modules that return facts.
@ -25,7 +25,7 @@ The following is a list of module_utils files and a general description. The mod
- iosxr.py - Definitions and helper functions for modules that manage Cisco IOS-XR networking devices - iosxr.py - Definitions and helper functions for modules that manage Cisco IOS-XR networking devices
- ismount.py - Contains single helper function that fixes os.path.ismount - ismount.py - Contains single helper function that fixes os.path.ismount
- junos.py - Definitions and helper functions for modules that manage Junos networking devices - junos.py - Definitions and helper functions for modules that manage Junos networking devices
- known_hosts.py - Utilites for working with known_hosts file - known_hosts.py - utilities for working with known_hosts file
- mysql.py - Allows modules to connect to a MySQL instance - mysql.py - Allows modules to connect to a MySQL instance
- netcfg.py - Configuration utility functions for use by networking modules - netcfg.py - Configuration utility functions for use by networking modules
- netcmd.py - Defines commands and comparison operators for use in networking modules - netcmd.py - Defines commands and comparison operators for use in networking modules
@ -40,7 +40,7 @@ The following is a list of module_utils files and a general description. The mod
- service.py - Contains utilities to enable modules to work with Linux services (placeholder, not in use). - service.py - Contains utilities to enable modules to work with Linux services (placeholder, not in use).
- shell.py - Functions to allow modules to create shells and work with shell commands - shell.py - Functions to allow modules to create shells and work with shell commands
- six.py - Module utils for working with the Six python 2 and 3 compatibility library - six.py - Module utils for working with the Six python 2 and 3 compatibility library
- splitter.py - String splitting and manipulation utilites for working with Jinja2 templates - splitter.py - String splitting and manipulation utilities for working with Jinja2 templates
- urls.py - Utilities for working with http and https requests - urls.py - Utilities for working with http and https requests
- vca.py - Contains utilities for modules that work with VMware vCloud Air - vca.py - Contains utilities for modules that work with VMware vCloud Air
- vmware.py - Contains utilities for modules that work with VMware vSphere VMs - vmware.py - Contains utilities for modules that work with VMware vSphere VMs

View file

@ -763,7 +763,7 @@ Windows modules checklist
* Look at existing modules for more examples of argument checking. * Look at existing modules for more examples of argument checking.
* Results * Results
* The result object should allways contain an attribute called changed set to either $true or $false * The result object should always contain an attribute called changed set to either $true or $false
* Create your result object like this:: * Create your result object like this::
$result = New-Object psobject @{ $result = New-Object psobject @{

View file

@ -71,7 +71,7 @@ numbers, other printable symbols, and a small number of unprintable "symbols"
(control codes). (control codes).
In Python-2, the two types for these (:class:`str` for bytes and In Python-2, the two types for these (:class:`str` for bytes and
:class:`unicode` for text) are often used interchangably. When dealing only :class:`unicode` for text) are often used interchangeably. When dealing only
with ASCII characters, the strings can be combined, compared, and converted with ASCII characters, the strings can be combined, compared, and converted
from one type to another automatically. When non-ASCII characters are from one type to another automatically. When non-ASCII characters are
introduced, Python starts throwing exceptions due to not knowing what encoding introduced, Python starts throwing exceptions due to not knowing what encoding

View file

@ -125,7 +125,7 @@ The following example shows how Ansible's timer plugin is implemented::
runtime = end_time - self.start_time runtime = end_time - self.start_time
self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(runtime))) self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(runtime)))
Note that the CALLBACK_VERSION and CALLBACK_NAME definitons are required for properly functioning plugins for Ansible >=2.0. Note that the CALLBACK_VERSION and CALLBACK_NAME definitions are required for properly functioning plugins for Ansible >=2.0.
.. _developing_connection_plugins: .. _developing_connection_plugins:
@ -193,7 +193,7 @@ An example of how this lookup is called::
- debug: msg="the value of foo.txt is {{ contents }} as seen today {{ lookup('pipe', 'date +"%Y-%m-%d"') }}" - debug: msg="the value of foo.txt is {{ contents }} as seen today {{ lookup('pipe', 'date +"%Y-%m-%d"') }}"
Errors encountered during execution should be returned by raising AnsibleError() with a message describing the error. Any strings returned by your lookup plugin implementation that could ever contain non-ASCII characters must be converted into Python's unicode type becasue the strings will be run through jinja2. To do this, you can use:: Errors encountered during execution should be returned by raising AnsibleError() with a message describing the error. Any strings returned by your lookup plugin implementation that could ever contain non-ASCII characters must be converted into Python's unicode type because the strings will be run through jinja2. To do this, you can use::
from ansible.module_utils._text import to_text from ansible.module_utils._text import to_text
result_string = to_text(result_string) result_string = to_text(result_string)

View file

@ -778,7 +778,7 @@ Windows modules checklist
* Look at existing modules for more examples of argument checking. * Look at existing modules for more examples of argument checking.
* Results * Results
* The result object should allways contain an attribute called changed set to either $true or $false * The result object should always contain an attribute called changed set to either $true or $false
* Create your result object like this:: * Create your result object like this::
$result = New-Object psobject @{ $result = New-Object psobject @{

View file

@ -164,7 +164,7 @@ This is how our inventory looks like:
As you can see, the public IPs for our web servers and jumphost has been assigned as variable ``public_ip`` directly in the inventory. As you can see, the public IPs for our web servers and jumphost has been assigned as variable ``public_ip`` directly in the inventory.
The configure the jumphost, web servers and database servers, we use ``group_vars``. The ``group_vars`` directory contains 4 files for configuration of the groups: cloud-vm, jumphost, webserver and db-server. The cloud-vm is there for specifing the defaults of our cloud infrastructure. The configure the jumphost, web servers and database servers, we use ``group_vars``. The ``group_vars`` directory contains 4 files for configuration of the groups: cloud-vm, jumphost, webserver and db-server. The cloud-vm is there for specifying the defaults of our cloud infrastructure.
.. code-block:: yaml .. code-block:: yaml
@ -231,7 +231,7 @@ Now to the fun part. We create a playbook to create our infrastructure we call i
In the above play we defined 3 tasks and use the group ``cloud-vm`` as target to handle all VMs in the cloud but instead SSH to these VMs, we use ``connetion=local`` to execute the API calls locally from our workstation. In the above play we defined 3 tasks and use the group ``cloud-vm`` as target to handle all VMs in the cloud but instead SSH to these VMs, we use ``connetion=local`` to execute the API calls locally from our workstation.
In the first task, we ensure we have a running VM created with the Debian template. If the VM is already created but stopped, it would just start it. If you like to change the offering on an exisiting VM, you must add ``force: yes`` to the task, which would stop the VM, change the offering and start the VM again. In the first task, we ensure we have a running VM created with the Debian template. If the VM is already created but stopped, it would just start it. If you like to change the offering on an existing VM, you must add ``force: yes`` to the task, which would stop the VM, change the offering and start the VM again.
In the second task we ensure the ports are opened if we give a public IP to the VM. In the second task we ensure the ports are opened if we give a public IP to the VM.

View file

@ -90,7 +90,7 @@ You also need Python 2.4 or later. If you are running less than Python 2.5 on th
Ansible 2.2 introduces a tech preview of support for Python 3. For more information, see `Python 3 Support <http://docs.ansible.com/ansible/python_3_support.html>`_. Ansible 2.2 introduces a tech preview of support for Python 3. For more information, see `Python 3 Support <http://docs.ansible.com/ansible/python_3_support.html>`_.
By default, Ansible uses Python 2 in order to maintain compability with older distributions By default, Ansible uses Python 2 in order to maintain compatibility with older distributions
such as RHEL 5 and RHEL 6. However, some Linux distributions (Gentoo, Arch) may not have a such as RHEL 5 and RHEL 6. However, some Linux distributions (Gentoo, Arch) may not have a
Python 2.X interpreter installed by default. On those systems, you should install one, and set Python 2.X interpreter installed by default. On those systems, you should install one, and set
the 'ansible_python_interpreter' variable in inventory (see :doc:`intro_inventory`) to point at your 2.X Python. Distributions the 'ansible_python_interpreter' variable in inventory (see :doc:`intro_inventory`) to point at your 2.X Python. Distributions

View file

@ -97,7 +97,7 @@ And if you want to read the list of hosts from a file, prefix the file name with
Easy enough. See :doc:`intro_adhoc` and then :doc:`playbooks` for how to apply this knowledge. Easy enough. See :doc:`intro_adhoc` and then :doc:`playbooks` for how to apply this knowledge.
.. note:: With the exception of version 1.9, you can use ',' instead of ':' as a host list separator. The ',' is prefered specially when dealing with ranges and ipv6. .. note:: With the exception of version 1.9, you can use ',' instead of ':' as a host list separator. The ',' is preferred specially when dealing with ranges and ipv6.
.. note:: As of 2.0 the ';' is deprecated as a host list separator. .. note:: As of 2.0 the ';' is deprecated as a host list separator.
.. seealso:: .. seealso::

View file

@ -364,7 +364,7 @@ You may wind up with a more readable playbook by using the PowerShell equivalent
- name: Move file on remote Windows Server from one location to another - name: Move file on remote Windows Server from one location to another
raw: Move-Item C:\teststuff\myfile.conf C:\builds\smtp.conf raw: Move-Item C:\teststuff\myfile.conf C:\builds\smtp.conf
Bear in mind that using C(raw) will allways report "changed", and it is your responsiblity to ensure PowerShell will need to handle idempotency as appropriate (the move examples above are inherently not idempotent), so where possible use (or write) a module. Bear in mind that using C(raw) will always report "changed", and it is your responsiblity to ensure PowerShell will need to handle idempotency as appropriate (the move examples above are inherently not idempotent), so where possible use (or write) a module.
Here's an example of how to use the win_stat module to test for file existence. Note that the data returned by the win_stat module is slightly different than what is provided by the Linux equivalent:: Here's an example of how to use the win_stat module to test for file existence. Note that the data returned by the win_stat module is slightly different than what is provided by the Linux equivalent::

View file

@ -328,7 +328,7 @@
# #
#pipelining = False #pipelining = False
# Control the mechanism for transfering files # Control the mechanism for transferring files
# * smart = try sftp and then try scp [default] # * smart = try sftp and then try scp [default]
# * True = use scp only # * True = use scp only
# * False = use sftp only # * False = use sftp only

View file

@ -521,7 +521,7 @@ class GalaxyCLI(CLI):
def execute_login(self): def execute_login(self):
""" """
Verify user's identify via Github and retreive an auth token from Galaxy. Verify user's identify via Github and retrieve an auth token from Galaxy.
""" """
# Authenticate with github and retrieve a token # Authenticate with github and retrieve a token
if self.options.token is None: if self.options.token is None:
@ -540,7 +540,7 @@ class GalaxyCLI(CLI):
token = GalaxyToken() token = GalaxyToken()
token.set(galaxy_response['token']) token.set(galaxy_response['token'])
display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username']) display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0 return 0
def execute_import(self): def execute_import(self):

View file

@ -72,7 +72,7 @@ def get_config(p, section, key, env_var, default, value_type=None, expand_relati
and return it as a python list. and return it as a python list.
:none: Sets the value to None :none: Sets the value to None
:path: Expands any environment variables and tilde's in the value. :path: Expands any environment variables and tilde's in the value.
:tmp_path: Create a unique temporary directory inside of the dirctory :tmp_path: Create a unique temporary directory inside of the directory
specified by value and return its path. specified by value and return its path.
:pathlist: Treat the value as a typical PATH string. (On POSIX, this :pathlist: Treat the value as a typical PATH string. (On POSIX, this
means colon separated strings.) Split the value and then expand means colon separated strings.) Split the value and then expand

View file

@ -413,7 +413,7 @@ class TaskExecutor:
# loop error takes precedence # loop error takes precedence
if self._loop_eval_error is not None: if self._loop_eval_error is not None:
raise self._loop_eval_error raise self._loop_eval_error
# skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags # skip conditional exception in the case of includes as the vars needed might not be available except in the included tasks or due to tags
if self._task.action not in ['include', 'include_role']: if self._task.action not in ['include', 'include_role']:
raise raise

View file

@ -23,7 +23,7 @@ from ansible.parsing.dataloader import DataLoader
class TaskResult: class TaskResult:
''' '''
This class is responsible for interpretting the resulting data This class is responsible for interpreting the resulting data
from an executed task, and provides helper methods for determining from an executed task, and provides helper methods for determining
the result of a given task. the result of a given task.
''' '''

View file

@ -152,7 +152,7 @@ class InventoryScript:
try: try:
got = self.host_vars_from_top.get(host.name, {}) got = self.host_vars_from_top.get(host.name, {})
except AttributeError as e: except AttributeError as e:
raise AnsibleError("Improperly formated host information for %s: %s" % (host.name,to_native(e))) raise AnsibleError("Improperly formatted host information for %s: %s" % (host.name,to_native(e)))
return got return got
cmd = [self.filename, "--host", host.name] cmd = [self.filename, "--host", host.name]

View file

@ -600,7 +600,7 @@ class Distribution(object):
""" """
This subclass of Facts fills the distribution, distribution_version and distribution_release variables This subclass of Facts fills the distribution, distribution_version and distribution_release variables
To do so it checks the existance and content of typical files in /etc containing distribution information To do so it checks the existence and content of typical files in /etc containing distribution information
This is unit tested. Please extend the tests to cover all distributions if you have them available. This is unit tested. Please extend the tests to cover all distributions if you have them available.
""" """

View file

@ -77,7 +77,7 @@ notes:
pause to delay further playbook execution until the instance is reachable, pause to delay further playbook execution until the instance is reachable,
if necessary. if necessary.
- This module returns multiple changed statuses on disassociation or release. - This module returns multiple changed statuses on disassociation or release.
It returns an overall status based on any changes occuring. It also returns It returns an overall status based on any changes occurring. It also returns
individual changed statuses for disassociation and release. individual changed statuses for disassociation and release.
''' '''

View file

@ -122,7 +122,7 @@ def create_scaling_policy(connection, module):
if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'): if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
changed = True changed = True
# set the min adjustment step incase the user decided to change their # set the min adjustment step in case the user decided to change their
# adjustment type to percentage # adjustment type to percentage
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step')) setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))

View file

@ -582,7 +582,7 @@ def main():
elif inst is not None: elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst) volume, changed = attach_volume(module, ec2, volume, inst)
# Add device, volume_id and volume_type parameters separately to maintain backward compatability # Add device, volume_id and volume_type parameters separately to maintain backward compatibility
volume_info = get_volume_info(volume, state) volume_info = get_volume_info(volume, state)
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type']) module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type'])
elif state == 'absent': elif state == 'absent':

View file

@ -960,7 +960,7 @@ def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
changed = True changed = True
success = True success = True
err_msg = ( err_msg = (
'NAT gateway {0} is in a deleting state. Delete was successfull' 'NAT gateway {0} is in a deleting state. Delete was successful'
.format(nat_gateway_id) .format(nat_gateway_id)
) )

View file

@ -284,7 +284,7 @@ class AzureRMSubnet(AzureRMModuleBase):
subnet) subnet)
new_subnet = self.get_poller_result(poller) new_subnet = self.get_poller_result(poller)
except Exception as exc: except Exception as exc:
self.fail("Error creating or updateing subnet {0} - {1}".format(self.name, str(exc))) self.fail("Error creating or updating subnet {0} - {1}".format(self.name, str(exc)))
self.check_provisioning_state(new_subnet) self.check_provisioning_state(new_subnet)
return subnet_to_dict(new_subnet) return subnet_to_dict(new_subnet)

View file

@ -287,7 +287,7 @@ class AnsibleCloudStackCluster(AnsibleCloudStack):
clusters = self.cs.listClusters(**args) clusters = self.cs.listClusters(**args)
if clusters: if clusters:
self.cluster = clusters['cluster'][0] self.cluster = clusters['cluster'][0]
# fix differnt return from API then request argument given # fix different return from API then request argument given
self.cluster['hypervisor'] = self.cluster['hypervisortype'] self.cluster['hypervisor'] = self.cluster['hypervisortype']
self.cluster['clustername'] = self.cluster['name'] self.cluster['clustername'] = self.cluster['name']
return self.cluster return self.cluster

View file

@ -175,7 +175,7 @@ options:
default: null default: null
zone: zone:
description: description:
- Name of the zone in which the instance shoud be deployed. - Name of the zone in which the instance should be deployed.
- If not set, default zone is used. - If not set, default zone is used.
required: false required: false
default: null default: null
@ -621,7 +621,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
instance = self.recover_instance(instance=instance) instance = self.recover_instance(instance=instance)
instance = self.update_instance(instance=instance, start_vm=start_vm) instance = self.update_instance(instance=instance, start_vm=start_vm)
# In check mode, we do not necessarely have an instance # In check mode, we do not necessarily have an instance
if instance: if instance:
instance = self.ensure_tags(resource=instance, resource_type='UserVm') instance = self.ensure_tags(resource=instance, resource_type='UserVm')
# refresh instance data # refresh instance data

View file

@ -108,7 +108,7 @@ options:
default: null default: null
zone: zone:
description: description:
- Name of the zone in which the rule shoud be created. - Name of the zone in which the rule should be created.
- If not set, default zone is used. - If not set, default zone is used.
required: false required: false
default: null default: null

View file

@ -38,7 +38,7 @@ options:
required: true required: true
architecture: architecture:
description: description:
- The archiecture for the container (e.g. "x86_64" or "i686"). - The architecture for the container (e.g. "x86_64" or "i686").
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
required: false required: false
config: config:

View file

@ -419,7 +419,7 @@ options:
state: state:
description: description:
- Indicates desired state of the instance. - Indicates desired state of the instance.
- If C(current), the current state of the VM will be fecthed. You can acces it with C(results.status) - If C(current), the current state of the VM will be fecthed. You can access it with C(results.status)
choices: ['present', 'started', 'absent', 'stopped', 'restarted','current'] choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
required: false required: false
default: present default: present

View file

@ -667,7 +667,7 @@ class RHEVConn(object):
setChanged() setChanged()
try: try:
NIC.update() NIC.update()
setMsg('iface has succesfully been updated.') setMsg('iface has successfully been updated.')
except Exception as e: except Exception as e:
setMsg("Failed to update the iface.") setMsg("Failed to update the iface.")
setMsg(str(e)) setMsg(str(e))

View file

@ -17,7 +17,7 @@ Naming
detail. A good example of this are floating IPs, which can come from either detail. A good example of this are floating IPs, which can come from either
Nova or Neutron, but which one they come from is immaterial to an end user. Nova or Neutron, but which one they come from is immaterial to an end user.
* If the module is one that a cloud admin would expect to use, it should be * If the module is one that a cloud admin would expect to use, it should be
be named with the service and the resouce, such as os\_keystone\_domain. be named with the service and the resource, such as os\_keystone\_domain.
* If the module is one that a cloud admin and a cloud consumer could both use, * If the module is one that a cloud admin and a cloud consumer could both use,
the cloud consumer rules apply. the cloud consumer rules apply.
@ -53,7 +53,7 @@ Libraries
OpenStack Client libraries. The OpenStack Client libraries do no have end OpenStack Client libraries. The OpenStack Client libraries do no have end
users as a primary audience, they are for intra-server communication. The users as a primary audience, they are for intra-server communication. The
python-openstacksdk is the future there, and shade will migrate to it when python-openstacksdk is the future there, and shade will migrate to it when
its ready in a manner that is not noticable to ansible users. its ready in a manner that is not noticeable to ansible users.
Testing Testing
------- -------

View file

@ -284,7 +284,7 @@ def ensure_user_role_exists(keystone, user_name, tenant_name, role_name,
Return (True, id) if a new role was created or if the role was newly Return (True, id) if a new role was created or if the role was newly
assigned to the user for the tenant. (False, id) if the role already assigned to the user for the tenant. (False, id) if the role already
exists and was already assigned to the user ofr the tenant. exists and was already assigned to the user for the tenant.
""" """
# Check if the user has the role in the tenant # Check if the user has the role in the tenant

View file

@ -153,7 +153,7 @@ def main():
if server: if server:
cloud.inspect_machine(server['uuid'], module.params['wait']) cloud.inspect_machine(server['uuid'], module.params['wait'])
# TODO(TheJulia): diff properties, ?and ports? and determine # TODO(TheJulia): diff properties, ?and ports? and determine
# if a change occured. In theory, the node is always changed # if a change occurred. In theory, the node is always changed
# if introspection is able to update the record. # if introspection is able to update the record.
module.exit_json(changed=True, module.exit_json(changed=True,
ansible_facts=server['properties']) ansible_facts=server['properties'])

View file

@ -58,7 +58,7 @@ options:
default: null default: null
project: project:
description: description:
- Name or ID of the project to scope the role assocation to. - Name or ID of the project to scope the role association to.
If you are using keystone version 2, then this value is required. If you are using keystone version 2, then this value is required.
required: false required: false
default: null default: null

View file

@ -45,7 +45,7 @@ options:
default: present default: present
choices: ['present', 'absent'] choices: ['present', 'absent']
description: description:
- Determines wether the backend is to be created/modified - Determines whether the backend is to be created/modified
or deleted or deleted
probe: probe:
required: false required: false

View file

@ -401,7 +401,7 @@ class VmsModule(BaseModule):
""" """
oVirt in version 4.1 doesn't support search by template+version_number, oVirt in version 4.1 doesn't support search by template+version_number,
so we need to list all templates with specific name and then iterate so we need to list all templates with specific name and then iterate
throught it's version until we find the version we look for. through it's version until we find the version we look for.
""" """
template = None template = None
if self._module.params['template']: if self._module.params['template']:

View file

@ -109,12 +109,12 @@ options:
default: 1 default: 1
subscription_user: subscription_user:
description: description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable. - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
required: false required: false
default: null default: null
subscription_password: subscription_password:
description: description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable. - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
required: false required: false
default: null default: null
wait: wait:

View file

@ -42,11 +42,11 @@ options:
choices: [ "us/las", "de/fra", "de/fkb" ] choices: [ "us/las", "de/fra", "de/fkb" ]
subscription_user: subscription_user:
description: description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable. - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
required: false required: false
subscription_password: subscription_password:
description: description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable. - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
required: false required: false
wait: wait:
description: description:

View file

@ -44,11 +44,11 @@ options:
required: true required: true
subscription_user: subscription_user:
description: description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable. - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
required: false required: false
subscription_password: subscription_password:
description: description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable. - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
required: false required: false
wait: wait:
description: description:

View file

@ -87,11 +87,11 @@ options:
required: false required: false
subscription_user: subscription_user:
description: description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable. - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
required: false required: false
subscription_password: subscription_password:
description: description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable. - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
required: false required: false
wait: wait:
description: description:

View file

@ -40,11 +40,11 @@ options:
required: true required: true
subscription_user: subscription_user:
description: description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable. - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
required: false required: false
subscription_password: subscription_password:
description: description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environement variable. - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
required: false required: false
wait: wait:
description: description:

View file

@ -118,7 +118,7 @@ options:
default: null default: null
post_uri: post_uri:
description: description:
- URL of a post provisioning script ot be loaded and exectued on virtual instance - URL of a post provisioning script to be loaded and executed on virtual instance
required: false required: false
default: null default: null
state: state:

View file

@ -794,7 +794,7 @@ class PyVmomiHelper(object):
clonespec_kwargs['config'].memoryMB = \ clonespec_kwargs['config'].memoryMB = \
int(self.params['hardware']['memory_mb']) int(self.params['hardware']['memory_mb'])
# lets try and assign a static ip addresss # lets try and assign a static ip address
if self.params['customize'] is True: if self.params['customize'] is True:
ip_settings = list() ip_settings = list()
if self.params['ips']: if self.params['ips']:

View file

@ -84,7 +84,7 @@ options:
default: None default: None
vm_shell_env: vm_shell_env:
description: description:
- Comma seperated list of envirnoment variable, specified in the guest OS notation - Comma separated list of envirnoment variable, specified in the guest OS notation
required: False required: False
default: None default: None
vm_shell_cwd: vm_shell_cwd:

View file

@ -56,7 +56,7 @@ options:
service_name: service_name:
description: description:
- Unique name for the service on a node, must be unique per node, - Unique name for the service on a node, must be unique per node,
required if registering a service. May be ommitted if registering required if registering a service. May be omitted if registering
a node level check a node level check
required: false required: false
service_id: service_id:

View file

@ -101,7 +101,7 @@ EXAMPLES = '''
- key: 'private/foo' - key: 'private/foo'
policy: deny policy: deny
- name: create an acl with specific token with both key and serivce rules - name: create an acl with specific token with both key and service rules
consul_acl: consul_acl:
mgmt_token: 'some_management_acl' mgmt_token: 'some_management_acl'
name: 'Foo access' name: 'Foo access'
@ -186,7 +186,7 @@ def update_acl(module):
changed = True changed = True
except Exception as e: except Exception as e:
module.fail_json( module.fail_json(
msg="No token returned, check your managment key and that \ msg="No token returned, check your management key and that \
the host is in the acl datacenter %s" % e) the host is in the acl datacenter %s" % e)
except Exception as e: except Exception as e:
module.fail_json(msg="Could not create/update acl %s" % e) module.fail_json(msg="Could not create/update acl %s" % e)

View file

@ -171,7 +171,7 @@ EXAMPLES = '''
roles: readWriteAnyDatabase roles: readWriteAnyDatabase
state: present state: present
# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is usefull for oplog access (MONGO_OPLOG_URL). # add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
# please notice the credentials must be added to the 'admin' database because the 'local' database is not syncronized and can't receive user credentials # please notice the credentials must be added to the 'admin' database because the 'local' database is not syncronized and can't receive user credentials
# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin" # To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
# This syntax requires mongodb 2.6+ and pymongo 2.5+ # This syntax requires mongodb 2.6+ and pymongo 2.5+

View file

@ -107,7 +107,7 @@ options:
""" """
EXAMPLES = r""" EXAMPLES = r"""
- name: insert/update "Match User" configuation block in /etc/ssh/sshd_config - name: insert/update "Match User" configuration block in /etc/ssh/sshd_config
blockinfile: blockinfile:
dest: /etc/ssh/sshd_config dest: /etc/ssh/sshd_config
block: | block: |

View file

@ -37,7 +37,7 @@ options:
basedir: basedir:
description: description:
- Path of a base directory in which the patch file will be applied. - Path of a base directory in which the patch file will be applied.
May be ommitted when C(dest) option is specified, otherwise required. May be omitted when C(dest) option is specified, otherwise required.
required: false required: false
dest: dest:
description: description:

View file

@ -71,7 +71,7 @@ options:
user_certificate: user_certificate:
description: description:
- List of Base-64 encoded server certificates. - List of Base-64 encoded server certificates.
- If option is ommitted certificates will not be checked or changed. - If option is omitted certificates will not be checked or changed.
- If an emtpy list is passed all assigned certificates will be removed. - If an emtpy list is passed all assigned certificates will be removed.
- Certificates already assigned but not passed will be removed. - Certificates already assigned but not passed will be removed.
required: false required: false

View file

@ -108,7 +108,7 @@ def unfollow_log(module, le_path, logs):
removed_count = 0 removed_count = 0
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for log in logs: for log in logs:
# Query the log first, to see if we even need to remove. # Query the log first, to see if we even need to remove.
if not query_log_status(module, le_path, log): if not query_log_status(module, le_path, log):

View file

@ -1526,7 +1526,7 @@ class Host(LogicMonitor):
groups, groups,
properties, properties,
alertenable): alertenable):
"""Return a property formated hash for the """Return a property formatted hash for the
creation of a host using the rpc function""" creation of a host using the rpc function"""
self.module.debug("Running Host._build_host_hash...") self.module.debug("Running Host._build_host_hash...")
@ -2017,7 +2017,7 @@ class Hostgroup(LogicMonitor):
description, description,
properties, properties,
alertenable): alertenable):
"""Return a property formated hash for the """Return a property formatted hash for the
creation of a hostgroup using the rpc function""" creation of a hostgroup using the rpc function"""
self.module.debug("Running Hostgroup._build_host_hash") self.module.debug("Running Hostgroup._build_host_hash")

View file

@ -120,7 +120,7 @@ notes:
so if Zabbix server's time and host's time are not synchronized, so if Zabbix server's time and host's time are not synchronized,
you will get strange results. you will get strange results.
- Install required module with 'pip install zabbix-api' command. - Install required module with 'pip install zabbix-api' command.
- Checks existance only by maintenance name. - Checks existence only by maintenance name.
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -349,7 +349,7 @@ def main():
(rc, maintenance, error) = get_maintenance_id(zbx, name) (rc, maintenance, error) = get_maintenance_id(zbx, name)
if rc != 0: if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error)) module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error))
if not maintenance: if not maintenance:
if not host_names and not host_groups: if not host_names and not host_groups:
@ -368,7 +368,7 @@ def main():
(rc, maintenance, error) = get_maintenance_id(zbx, name) (rc, maintenance, error) = get_maintenance_id(zbx, name)
if rc != 0: if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error)) module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error))
if maintenance: if maintenance:
if module.check_mode: if module.check_mode:

View file

@ -94,7 +94,7 @@ options:
desirable to have the task get the current running-config for desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the every task in a playbook. The I(config) argument allows the
implementer to pass in the configuruation to use as the base implementer to pass in the configuruation to use as the base
config for comparision. config for comparison.
required: false required: false
default: null default: null
""" """

View file

@ -153,7 +153,7 @@ class BigIpDeviceNtp(object):
r = self.api.tm.sys.ntp.load() r = self.api.tm.sys.ntp.load()
if hasattr(r, 'servers'): if hasattr(r, 'servers'):
# Deliberately using sets to supress duplicates # Deliberately using sets to suppress duplicates
p['servers'] = set([str(x) for x in r.servers]) p['servers'] = set([str(x) for x in r.servers])
if hasattr(r, 'timezone'): if hasattr(r, 'timezone'):
p['timezone'] = str(r.timezone) p['timezone'] = str(r.timezone)

View file

@ -284,7 +284,7 @@ class BigIpDeviceSshd(object):
r = self.api.tm.sys.sshd.load() r = self.api.tm.sys.sshd.load()
if hasattr(r, 'allow'): if hasattr(r, 'allow'):
# Deliberately using sets to supress duplicates # Deliberately using sets to suppress duplicates
p['allow'] = set([str(x) for x in r.allow]) p['allow'] = set([str(x) for x in r.allow])
if hasattr(r, 'banner'): if hasattr(r, 'banner'):
p['banner'] = str(r.banner) p['banner'] = str(r.banner)

View file

@ -193,7 +193,7 @@ class BigIpGtmDatacenter(object):
) )
if hasattr(r, 'servers'): if hasattr(r, 'servers'):
# Deliberately using sets to supress duplicates # Deliberately using sets to suppress duplicates
p['servers'] = set([str(x) for x in r.servers]) p['servers'] = set([str(x) for x in r.servers])
if hasattr(r, 'contact'): if hasattr(r, 'contact'):
p['contact'] = str(r.contact) p['contact'] = str(r.contact)

View file

@ -105,7 +105,7 @@ options:
ratio: ratio:
description: description:
- Pool member ratio weight. Valid values range from 1 through 100. - Pool member ratio weight. Valid values range from 1 through 100.
New pool members -- unless overriden with this value -- default New pool members -- unless overridden with this value -- default
to 1. to 1.
required: false required: false
default: null default: null

View file

@ -536,7 +536,7 @@ class BigIpSelfIp(object):
BIG-IP, we need to massage the values that are provided by the BIG-IP, we need to massage the values that are provided by the
user so that they include the partition. user so that they include the partition.
:return: List of vlans formatted with preceeding partition :return: List of vlans formatted with preceding partition
""" """
partition = self.params['partition'] partition = self.params['partition']
vlans = self.api.tm.net.vlans.get_collection() vlans = self.api.tm.net.vlans.get_collection()

View file

@ -111,10 +111,10 @@ options:
- Specify if the configuration receives mirrored traffic. - Specify if the configuration receives mirrored traffic.
pn_unknown_ucast_level: pn_unknown_ucast_level:
description: description:
- Specify an unkown unicast level in percent. The default value is 100%. - Specify an unknown unicast level in percent. The default value is 100%.
pn_unknown_mcast_level: pn_unknown_mcast_level:
description: description:
- Specify an unkown multicast level in percent. The default value is 100%. - Specify an unknown multicast level in percent. The default value is 100%.
pn_broadcast_level: pn_broadcast_level:
description: description:
- Specify a broadcast level in percent. The default value is 100%. - Specify a broadcast level in percent. The default value is 100%.

View file

@ -56,7 +56,7 @@ options:
required: false required: false
integrity: integrity:
description: description:
- Hashing algoritm, required if version is v3 - Hashing algorithm, required if version is v3
choices: [ 'md5', 'sha' ] choices: [ 'md5', 'sha' ]
required: false required: false
authkey: authkey:
@ -65,7 +65,7 @@ options:
required: false required: false
privacy: privacy:
description: description:
- Encryption algoritm, required if level is authPriv - Encryption algorithm, required if level is authPriv
choices: [ 'des', 'aes' ] choices: [ 'des', 'aes' ]
required: false required: false
privkey: privkey:

View file

@ -43,7 +43,7 @@ options:
remote_max_checkpoints: remote_max_checkpoints:
description: description:
- The I(remote_max_checkpoints) argument configures the maximum - The I(remote_max_checkpoints) argument configures the maximum
number of rollback files that can be transfered and saved to number of rollback files that can be transferred and saved to
a remote location. Valid values for this argument are in the a remote location. Valid values for this argument are in the
range of 1 to 50 range of 1 to 50
required: false required: false

View file

@ -84,7 +84,7 @@ def wakeonlan(module, mac, broadcast, port):
mac_orig = mac mac_orig = mac
# Remove possible seperator from MAC address # Remove possible separator from MAC address
if len(mac) == 12 + 5: if len(mac) == 12 + 5:
mac = mac.replace(mac[2], '') mac = mac.replace(mac[2], '')

View file

@ -182,7 +182,7 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj
if color == "normal" and text is not None: if color == "normal" and text is not None:
payload = dict(text=html_escape(text)) payload = dict(text=html_escape(text))
elif text is not None: elif text is not None:
# With a custom color we have to set the message as attachment, and explicitely turn markdown parsing on for it. # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it.
payload = dict(attachments=[dict(text=html_escape(text), color=color, mrkdwn_in=["text"])]) payload = dict(attachments=[dict(text=html_escape(text), color=color, mrkdwn_in=["text"])])
if channel is not None: if channel is not None:
if (channel[0] == '#') or (channel[0] == '@'): if (channel[0] == '#') or (channel[0] == '@'):

View file

@ -149,7 +149,7 @@ def has_changed(string):
return "Nothing to install or update" not in string return "Nothing to install or update" not in string
def get_available_options(module, command='install'): def get_available_options(module, command='install'):
# get all availabe options from a composer command using composer help to json # get all available options from a composer command using composer help to json
rc, out, err = composer_command(module, "help %s --format=json" % command) rc, out, err = composer_command(module, "help %s --format=json" % command)
if rc != 0: if rc != 0:
output = parse_out(err) output = parse_out(err)

View file

@ -120,7 +120,7 @@ def query_package(module, name, state="present"):
def remove_packages(module, packages): def remove_packages(module, packages):
remove_c = 0 remove_c = 0
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
installed, updated = query_package(module, package) installed, updated = query_package(module, package)

View file

@ -112,7 +112,7 @@ def update_package_db(module):
def remove_packages(module, packages): def remove_packages(module, packages):
remove_c = 0 remove_c = 0
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
if not query_package(module, package): if not query_package(module, package):

View file

@ -108,7 +108,7 @@ def remove_packages(module, port_path, packages):
""" Uninstalls one or more packages if installed. """ """ Uninstalls one or more packages if installed. """
remove_c = 0 remove_c = 0
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
if not query_package(module, port_path, package): if not query_package(module, port_path, package):

View file

@ -448,7 +448,7 @@ def upgrade_packages(module):
# Attempt to upgrade all packages. # Attempt to upgrade all packages.
rc, stdout, stderr = execute_command("%s" % upgrade_cmd, module) rc, stdout, stderr = execute_command("%s" % upgrade_cmd, module)
# Try to find any occurance of a package changing version like: # Try to find any occurrence of a package changing version like:
# "bzip2-1.0.6->1.0.6p0: ok". # "bzip2-1.0.6->1.0.6p0: ok".
match = re.search("\W\w.+->.+: ok\W", stdout) match = re.search("\W\w.+->.+: ok\W", stdout)
if match: if match:

View file

@ -111,7 +111,7 @@ def remove_packages(module, opkg_path, packages):
force = "--force-%s" % force force = "--force-%s" % force
remove_c = 0 remove_c = 0
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
if not query_package(module, opkg_path, package): if not query_package(module, opkg_path, package):

View file

@ -216,7 +216,7 @@ def remove_packages(module, pacman_path, packages):
args = "R" args = "R"
remove_c = 0 remove_c = 0
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
installed, updated, unknown = query_package(module, pacman_path, package) installed, updated, unknown = query_package(module, pacman_path, package)

View file

@ -249,7 +249,7 @@ def remove_packages(module, packages):
remove_c = 0 remove_c = 0
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
if not query_package(module, package): if not query_package(module, package):

View file

@ -141,7 +141,7 @@ def pkgng_older_than(module, pkgng_path, compare_version):
def remove_packages(module, pkgng_path, packages, dir_arg): def remove_packages(module, pkgng_path, packages, dir_arg):
remove_c = 0 remove_c = 0
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
if not query_package(module, pkgng_path, package, dir_arg): if not query_package(module, pkgng_path, package, dir_arg):

View file

@ -109,7 +109,7 @@ def matching_packages(module, name):
ports_glob_path = module.get_bin_path('ports_glob', True) ports_glob_path = module.get_bin_path('ports_glob', True)
rc, out, err = module.run_command("%s %s" % (ports_glob_path, name)) rc, out, err = module.run_command("%s %s" % (ports_glob_path, name))
#counts the numer of packages found # counts the number of packages found
occurrences = out.count('\n') occurrences = out.count('\n')
if occurrences == 0: if occurrences == 0:
name_without_digits = re.sub('[0-9]', '', name) name_without_digits = re.sub('[0-9]', '', name)
@ -130,7 +130,7 @@ def remove_packages(module, packages):
pkg_delete_path = module.get_bin_path('pkg', True) pkg_delete_path = module.get_bin_path('pkg', True)
pkg_delete_path = pkg_delete_path + " delete -y" pkg_delete_path = pkg_delete_path + " delete -y"
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
if not query_package(module, package): if not query_package(module, package):

View file

@ -94,7 +94,7 @@ def query_package(module, slackpkg_path, name):
def remove_packages(module, slackpkg_path, packages): def remove_packages(module, slackpkg_path, packages):
remove_c = 0 remove_c = 0
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
if not query_package(module, slackpkg_path, package): if not query_package(module, slackpkg_path, package):

View file

@ -82,7 +82,7 @@ def compare_package(version1, version2):
Return values: Return values:
-1 first minor -1 first minor
0 equal 0 equal
1 fisrt greater """ 1 first greater """
def normalize(v): def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")] return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
@ -178,7 +178,7 @@ def main():
rc, output = install_package(module, depot, name) rc, output = install_package(module, depot, name)
if not rc: if not rc:
msg = "Packge upgraded, Before " + version_installed + " Now " + version_depot msg = "Package upgraded, Before " + version_installed + " Now " + version_depot
changed = True changed = True
else: else:

View file

@ -124,7 +124,7 @@ def update_package_db(module):
def remove_packages(module, packages): def remove_packages(module, packages):
remove_c = 0 remove_c = 0
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
if not query_package(module, package): if not query_package(module, package):

View file

@ -161,7 +161,7 @@ def upgrade(module, xbps_path):
def remove_packages(module, xbps_path, packages): def remove_packages(module, xbps_path, packages):
"""Returns true if package removal succeeds""" """Returns true if package removal succeeds"""
changed_packages = [] changed_packages = []
# Using a for loop incase of error, we can report the package that failed # Using a for loop in case of error, we can report the package that failed
for package in packages: for package in packages:
# Query the package first, to see if we even need to remove # Query the package first, to see if we even need to remove
installed, updated = query_package(module, xbps_path, package) installed, updated = query_package(module, xbps_path, package)

View file

@ -80,7 +80,7 @@ options:
- whether the list of target nodes on the portal should be - whether the list of target nodes on the portal should be
(re)discovered and added to the persistent iscsi database. (re)discovered and added to the persistent iscsi database.
Keep in mind that iscsiadm discovery resets configurtion, like node.startup Keep in mind that iscsiadm discovery resets configurtion, like node.startup
to manual, hence combined with auto_node_startup=yes will allways return to manual, hence combined with auto_node_startup=yes will always return
a changed state. a changed state.
show_nodes: show_nodes:
required: false required: false

View file

@ -127,7 +127,7 @@ def _load_dist_subclass(cls, *args, **kwargs):
class Svc(object): class Svc(object):
""" """
Main class that handles daemontools, can be subclassed and overriden in case Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc we want to use a 'derivative' like encore, s6, etc
""" """

View file

@ -81,7 +81,7 @@ from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
class EjabberdUserException(Exception): class EjabberdUserException(Exception):
""" Base exeption for EjabberdUser class object """ """ Base exception for EjabberdUser class object """
pass pass
class EjabberdUser(object): class EjabberdUser(object):

View file

@ -128,7 +128,7 @@ options:
- Defines whether to install plugin dependencies. - Defines whether to install plugin dependencies.
notes: notes:
- Plugin installation shoud be run under root or the same user which owns - Plugin installation should be run under root or the same user which owns
the plugin files on the disk. Only if the plugin is not installed yet and the plugin files on the disk. Only if the plugin is not installed yet and
no version is specified, the API installation is performed which requires no version is specified, the API installation is performed which requires
only the Web UI credentials. only the Web UI credentials.

View file

@ -42,14 +42,14 @@ description:
- "To use this module, it has to be executed at least twice. Either as two - "To use this module, it has to be executed at least twice. Either as two
different tasks in the same run or during multiple runs." different tasks in the same run or during multiple runs."
- "Between these two tasks you have to fulfill the required steps for the - "Between these two tasks you have to fulfill the required steps for the
choosen challenge by whatever means necessary. For http-01 that means chosen challenge by whatever means necessary. For http-01 that means
creating the necessary challenge file on the destination webserver. For creating the necessary challenge file on the destination webserver. For
dns-01 the necessary dns record has to be created. tls-sni-02 requires dns-01 the necessary dns record has to be created. tls-sni-02 requires
you to create a SSL certificate with the appropriate subjectAlternativeNames. you to create a SSL certificate with the appropriate subjectAlternativeNames.
It is I(not) the responsibility of this module to perform these steps." It is I(not) the responsibility of this module to perform these steps."
- "For details on how to fulfill these challenges, you might have to read through - "For details on how to fulfill these challenges, you might have to read through
U(https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7)" U(https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7)"
- "Although the defaults are choosen so that the module can be used with - "Although the defaults are chosen so that the module can be used with
the Let's Encrypt CA, the module can be used with any service using the ACME the Let's Encrypt CA, the module can be used with any service using the ACME
protocol." protocol."
requirements: requirements:
@ -293,7 +293,7 @@ class ACMEDirectory(object):
class ACMEAccount(object): class ACMEAccount(object):
''' '''
ACME account object. Handles the authorized communication with the ACME account object. Handles the authorized communication with the
ACME server. Provides access to accound bound information like ACME server. Provides access to account bound information like
the currently active authorizations and valid certificates the currently active authorizations and valid certificates
''' '''
def __init__(self,module): def __init__(self,module):
@ -607,7 +607,7 @@ class ACMEClient(object):
keyauthorization = self.account.get_keyauthorization(token) keyauthorization = self.account.get_keyauthorization(token)
# NOTE: tls-sni-01 is not supported by choice # NOTE: tls-sni-01 is not supported by choice
# too complex to be usefull and tls-sni-02 is an alternative # too complex to be useful and tls-sni-02 is an alternative
# as soon as it is implemented server side # as soon as it is implemented server side
if type == 'http-01': if type == 'http-01':
# https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.2 # https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.2
@ -637,7 +637,7 @@ class ACMEClient(object):
def _validate_challenges(self,auth): def _validate_challenges(self,auth):
''' '''
Validate the authorization provided in the auth dict. Returns True Validate the authorization provided in the auth dict. Returns True
when the validation was successfull and False when it was not. when the validation was successful and False when it was not.
''' '''
for challenge in auth['challenges']: for challenge in auth['challenges']:
if self.challenge != challenge['type']: if self.challenge != challenge['type']:
@ -716,7 +716,7 @@ class ACMEClient(object):
def do_challenges(self): def do_challenges(self):
''' '''
Create new authorizations for all domains of the CSR and return Create new authorizations for all domains of the CSR and return
the challenge details for the choosen challenge type. the challenge details for the chosen challenge type.
''' '''
data = {} data = {}
for domain in self.domains: for domain in self.domains:

View file

@ -175,7 +175,7 @@ Try {
Set-Attr $result "changed" $true; Set-Attr $result "changed" $true;
} }
Catch { Catch {
Fail-Json $result "an exception occured when adding the specified rule" Fail-Json $result "an exception occurred when adding the specified rule"
} }
} }
ElseIf ($state -eq "absent" -And $match -eq $true) { ElseIf ($state -eq "absent" -And $match -eq $true) {
@ -185,7 +185,7 @@ Try {
Set-Attr $result "changed" $true; Set-Attr $result "changed" $true;
} }
Catch { Catch {
Fail-Json $result "an exception occured when removing the specified rule" Fail-Json $result "an exception occurred when removing the specified rule"
} }
} }
Else { Else {
@ -200,7 +200,7 @@ Try {
} }
} }
Catch { Catch {
Fail-Json $result "an error occured when attempting to $state $rights permission(s) on $path for $user" Fail-Json $result "an error occurred when attempting to $state $rights permission(s) on $path for $user"
} }
Exit-Json $result Exit-Json $result

View file

@ -80,7 +80,7 @@ Try {
} }
} }
Catch { Catch {
Fail-Json $result "an error occured when attempting to disable inheritance" Fail-Json $result "an error occurred when attempting to disable inheritance"
} }
Exit-Json $result Exit-Json $result

View file

@ -48,7 +48,7 @@ options:
required: true required: true
direction: direction:
description: description:
- is this rule for inbound or outbound trafic - is this rule for inbound or outbound traffic
default: null default: null
required: true required: true
choices: ['in', 'out'] choices: ['in', 'out']

View file

@ -50,7 +50,7 @@ options:
aliases: [] aliases: []
attributes: attributes:
description: description:
- Application Pool attributes from string where attributes are seperated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2" - Application Pool attributes from string where attributes are separated by a pipe and attribute name/values by colon Ex. "foo:1|bar:2"
required: false required: false
default: null default: null
aliases: [] aliases: []

View file

@ -88,7 +88,7 @@ try {
if (-not $curent_bindings -and $state -eq 'present') { if (-not $curent_bindings -and $state -eq 'present') {
New-WebBinding @binding_parameters -Force New-WebBinding @binding_parameters -Force
# Select certificat # Select certificate
if($certificateHash -ne $FALSE) { if($certificateHash -ne $FALSE) {
$ip = $binding_parameters["IPAddress"] $ip = $binding_parameters["IPAddress"]

View file

@ -49,7 +49,7 @@ $bind_hostname = Get-Attr $params "hostname" $FALSE;
$bind_ssl = Get-Attr $params "ssl" $FALSE; $bind_ssl = Get-Attr $params "ssl" $FALSE;
# Custom site Parameters from string where properties # Custom site Parameters from string where properties
# are seperated by a pipe and property name/values by colon. # are separated by a pipe and property name/values by colon.
# Ex. "foo:1|bar:2" # Ex. "foo:1|bar:2"
$parameters = Get-Attr $params "parameters" $null; $parameters = Get-Attr $params "parameters" $null;
if($parameters -ne $null) { if($parameters -ne $null) {

View file

@ -91,7 +91,7 @@ options:
aliases: [] aliases: []
parameters: parameters:
description: description:
- Custom site Parameters from string where properties are seperated by a pipe and property name/values by colon Ex. "foo:1|bar:2" - Custom site Parameters from string where properties are separated by a pipe and property name/values by colon Ex. "foo:1|bar:2"
required: false required: false
default: null default: null
aliases: [] aliases: []

View file

@ -116,7 +116,7 @@ function BackupFile($path) {
function Present($dest, $regexp, $line, $insertafter, $insertbefore, $create, $backup, $backrefs, $validate, $encodingobj, $linesep) { function Present($dest, $regexp, $line, $insertafter, $insertbefore, $create, $backup, $backrefs, $validate, $encodingobj, $linesep) {
# Note that we have to clean up the dest path because ansible wants to treat / and \ as # Note that we have to clean up the dest path because ansible wants to treat / and \ as
# interchangable in windows pathnames, but .NET framework internals do not support that. # interchangeable in windows pathnames, but .NET framework internals do not support that.
$cleandest = $dest.Replace("/", "\"); $cleandest = $dest.Replace("/", "\");
# Check if destination exists. If it does not exist, either create it if create == "yes" # Check if destination exists. If it does not exist, either create it if create == "yes"

View file

@ -130,7 +130,7 @@ Try {
} }
} }
Catch { Catch {
Fail-Json $result "an error occured when attempting to change owner on $path for $user" Fail-Json $result "an error occurred when attempting to change owner on $path for $user"
} }
Exit-Json $result Exit-Json $result

View file

@ -144,9 +144,9 @@ Function Get-RegistryValueIgnoreError
} }
} }
catch catch
{ {
$exceptionText = ($_ | Out-String).Trim() $exceptionText = ($_ | Out-String).Trim()
Write-Verbose "Exception occured in Get-RegistryValueIgnoreError: $exceptionText" Write-Verbose "Exception occurred in Get-RegistryValueIgnoreError: $exceptionText"
} }
return $null return $null
} }

Some files were not shown because too many files have changed in this diff Show more