Misc typo fixes (#49816)
Signed-off-by: Abhijeet Kasurde <akasurde@redhat.com>
This commit is contained in:
parent
7609a8cdd6
commit
013c42b14f
24 changed files with 37 additions and 37 deletions
|
@ -199,7 +199,7 @@ class CLI(with_metaclass(ABCMeta, object)):
|
|||
for password_file in vault_password_files:
|
||||
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
|
||||
|
||||
# note this makes --vault-id higher precendence than --vault-password-file
|
||||
# note this makes --vault-id higher precedence than --vault-password-file
|
||||
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
|
||||
# used by --vault-id and --vault-password-file
|
||||
vault_ids.append(id_slug)
|
||||
|
@ -267,7 +267,7 @@ class CLI(with_metaclass(ABCMeta, object)):
|
|||
vault_id=built_vault_id)
|
||||
|
||||
# a empty or invalid password from the prompt will warn and continue to the next
|
||||
# without erroring globablly
|
||||
# without erroring globally
|
||||
try:
|
||||
prompted_vault_secret.load()
|
||||
except AnsibleError as exc:
|
||||
|
|
|
@ -97,7 +97,7 @@ class ConfigCLI(CLI):
|
|||
|
||||
# pylint: disable=unreachable
|
||||
if self.options.setting is None:
|
||||
raise AnsibleOptionsError("update option requries a setting to update")
|
||||
raise AnsibleOptionsError("update option requires a setting to update")
|
||||
|
||||
(entry, value) = self.options.setting.split('=')
|
||||
if '.' in entry:
|
||||
|
|
|
@ -461,7 +461,7 @@ DEFAULT_BECOME_FLAGS:
|
|||
- {key: become_flags, section: privilege_escalation}
|
||||
DEFAULT_BECOME_USER:
|
||||
# FIXME: should really be blank and make -u passing optional depending on it
|
||||
name: Set the user you 'become' via privlege escalation
|
||||
name: Set the user you 'become' via privilege escalation
|
||||
default: root
|
||||
description: The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.
|
||||
env: [{name: ANSIBLE_BECOME_USER}]
|
||||
|
|
|
@ -431,11 +431,11 @@ class ConfigManager(object):
|
|||
else:
|
||||
value = defs[config].get('default')
|
||||
origin = 'default'
|
||||
# skip typing as this is a temlated default that will be resolved later in constants, which has needed vars
|
||||
# skip typing as this is a templated default that will be resolved later in constants, which has needed vars
|
||||
if plugin_type is None and isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')):
|
||||
return value, origin
|
||||
|
||||
# ensure correct type, can raise exceptoins on mismatched types
|
||||
# ensure correct type, can raise exceptions on mismatched types
|
||||
try:
|
||||
value = ensure_type(value, defs[config].get('type'), origin=origin)
|
||||
except ValueError as e:
|
||||
|
|
|
@ -341,7 +341,7 @@ instances:
|
|||
type: string
|
||||
sample: "2099-12-31T15:59Z"
|
||||
gpu:
|
||||
description: The attribution of instane GPU.
|
||||
description: The attribution of instance GPU.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
|
|
|
@ -170,7 +170,7 @@ instances:
|
|||
type: string
|
||||
sample: "2099-12-31T15:59Z"
|
||||
gpu:
|
||||
description: The attribution of instane GPU.
|
||||
description: The attribution of instance GPU.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
|
|
|
@ -270,7 +270,7 @@ def configure_api(client, api_data=None, api_id=None, mode="overwrite"):
|
|||
|
||||
@AWSRetry.backoff(**retry_params)
|
||||
def create_deployment(client, api_id=None, stage=None, description=None):
|
||||
# we can also get None as an argument so we don't do this as a defult
|
||||
# we can also get None as an argument so we don't do this as a default
|
||||
return client.create_deployment(restApiId=api_id, stageName=stage, description=description)
|
||||
|
||||
|
||||
|
|
|
@ -651,7 +651,7 @@ custom_error_responses:
|
|||
type: complex
|
||||
contains:
|
||||
error_caching_min_ttl:
|
||||
description: Mininum time to cache this error response
|
||||
description: Minimum time to cache this error response
|
||||
returned: always
|
||||
type: int
|
||||
sample: 300
|
||||
|
|
|
@ -36,7 +36,7 @@ EXAMPLES = '''
|
|||
|
||||
RETURN = '''
|
||||
log_groups:
|
||||
description: Return the list of complex objetcs representing log groups
|
||||
description: Return the list of complex objects representing log groups
|
||||
returned: success
|
||||
type: complex
|
||||
contains:
|
||||
|
|
|
@ -74,7 +74,7 @@ options:
|
|||
type: str
|
||||
origin_path:
|
||||
description:
|
||||
- A directory path on the origin that CDN can use to retreive content from.
|
||||
- A directory path on the origin that CDN can use to retrieve content from.
|
||||
type: str
|
||||
content_types_to_compress:
|
||||
description:
|
||||
|
|
|
@ -35,7 +35,7 @@ options:
|
|||
- It can be a virtual machine, OS disk blob URI, managed OS disk, or OS snapshot.
|
||||
- Each type of source except for blob URI can be given as resource id, name or a dict contains C(resource_group), C(name) and C(types).
|
||||
- If source type is blob URI, the source should be the full URI of the blob in string type.
|
||||
- If you specify the C(type) in a dict, acceptable value constains C(disks), C(virtual_machines) and C(snapshots).
|
||||
- If you specify the C(type) in a dict, acceptable value contains C(disks), C(virtual_machines) and C(snapshots).
|
||||
type: raw
|
||||
required: true
|
||||
data_disk_sources:
|
||||
|
@ -237,7 +237,7 @@ class AzureRMImage(AzureRMModuleBase):
|
|||
elif isinstance(source, str):
|
||||
tokenize = parse_resource_id(source)
|
||||
else:
|
||||
self.fail("source parameter sould be in type string or dictionary")
|
||||
self.fail("source parameter should be in type string or dictionary")
|
||||
if tokenize.get('type') == 'disks':
|
||||
disk = format_resource_id(tokenize['name'],
|
||||
tokenize.get('subscription_id') or self.subscription_id,
|
||||
|
|
|
@ -408,7 +408,7 @@ def nic_to_dict(nic):
|
|||
internal_fqdn=nic.dns_settings.internal_fqdn
|
||||
),
|
||||
ip_configurations=ip_configurations,
|
||||
ip_configuration=ip_configurations[0] if len(ip_configurations) == 1 else None, # for compatiable issue, keep this field
|
||||
ip_configuration=ip_configurations[0] if len(ip_configurations) == 1 else None, # for compatible issue, keep this field
|
||||
mac_address=nic.mac_address,
|
||||
enable_ip_forwarding=nic.enable_ip_forwarding,
|
||||
provisioning_state=nic.provisioning_state,
|
||||
|
@ -512,7 +512,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
|
|||
self.security_group = self.parse_resource_to_dict(self.security_group or self.name)
|
||||
|
||||
if self.state == 'present' and not self.ip_configurations:
|
||||
# construct the ip_configurations array for compatiable
|
||||
# construct the ip_configurations array for compatible
|
||||
self.deprecate('Setting ip_configuration flatten is deprecated and will be removed.'
|
||||
' Using ip_configurations list to define the ip configuration', version='2.9')
|
||||
self.ip_configurations = [
|
||||
|
|
|
@ -112,7 +112,7 @@ options:
|
|||
- Dict of tenant settings.
|
||||
state:
|
||||
description:
|
||||
- Assert the state of the redis cahce.
|
||||
- Assert the state of the redis cache.
|
||||
- Use 'present' to create or update a redis cache and 'absent' to delete it.
|
||||
default: present
|
||||
choices:
|
||||
|
|
|
@ -57,7 +57,7 @@ EXAMPLES = '''
|
|||
resource_group: TestRG
|
||||
profile_name: Testing
|
||||
|
||||
- name: Get specific endpoint of a Traffic Manager profie
|
||||
- name: Get specific endpoint of a Traffic Manager profile
|
||||
azure_rm_trafficmanager_facts:
|
||||
resource_group: TestRG
|
||||
profile_name: Testing
|
||||
|
|
|
@ -33,7 +33,7 @@ options:
|
|||
- Limit results by resource group.
|
||||
return_publish_profile:
|
||||
description:
|
||||
- Indicate wheather to return publishing profile of the web app.
|
||||
- Indicate whether to return publishing profile of the web app.
|
||||
default: False
|
||||
type: bool
|
||||
tags:
|
||||
|
@ -127,14 +127,14 @@ webapps:
|
|||
description: Outbound ip address of the web app.
|
||||
type: str
|
||||
ftp_publish_url:
|
||||
description: Publishing url of the web app when depeloyment type is FTP.
|
||||
description: Publishing url of the web app when deployment type is FTP.
|
||||
type: str
|
||||
sample: ftp://xxxx.ftp.azurewebsites.windows.net
|
||||
state:
|
||||
description: State of the web app. eg. running.
|
||||
type: str
|
||||
publishing_username:
|
||||
description: Publishing profle user name.
|
||||
description: Publishing profile user name.
|
||||
returned: only when I(return_publish_profile) is True.
|
||||
type: str
|
||||
publishing_password:
|
||||
|
|
|
@ -363,7 +363,7 @@ def main():
|
|||
|
||||
target_state = module.params['state']
|
||||
server = AnsibleCloudscaleServer(module)
|
||||
# The server could be in a changeing or error state.
|
||||
# The server could be in a changing or error state.
|
||||
# Wait for one of the allowed states before doing anything.
|
||||
# If an allowed state can't be reached, this module fails.
|
||||
if server.info['state'] not in ALLOWED_STATES:
|
||||
|
|
|
@ -162,7 +162,7 @@ def get_global_forwarding_rule(client, name, project_id=None):
|
|||
"""
|
||||
Get a Global Forwarding Rule from GCP.
|
||||
|
||||
:param client: An initialized GCE Compute Disovery resource.
|
||||
:param client: An initialized GCE Compute Discovery resource.
|
||||
:type client: :class: `googleapiclient.discovery.Resource`
|
||||
|
||||
:param name: Name of the Global Forwarding Rule.
|
||||
|
@ -186,7 +186,7 @@ def create_global_forwarding_rule(client, params, project_id):
|
|||
"""
|
||||
Create a new Global Forwarding Rule.
|
||||
|
||||
:param client: An initialized GCE Compute Disovery resource.
|
||||
:param client: An initialized GCE Compute Discovery resource.
|
||||
:type client: :class: `googleapiclient.discovery.Resource`
|
||||
|
||||
:param params: Dictionary of arguments from AnsibleModule.
|
||||
|
@ -239,7 +239,7 @@ def update_global_forwarding_rule(client, forwarding_rule, params, name, project
|
|||
|
||||
If the forwarding_rule has not changed, the update will not occur.
|
||||
|
||||
:param client: An initialized GCE Compute Disovery resource.
|
||||
:param client: An initialized GCE Compute Discovery resource.
|
||||
:type client: :class: `googleapiclient.discovery.Resource`
|
||||
|
||||
:param forwarding_rule: Name of the Target Proxy.
|
||||
|
|
|
@ -244,7 +244,7 @@ def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
|
|||
"""
|
||||
Get a Healthcheck from GCP.
|
||||
|
||||
:param client: An initialized GCE Compute Disovery resource.
|
||||
:param client: An initialized GCE Compute Discovery resource.
|
||||
:type client: :class: `googleapiclient.discovery.Resource`
|
||||
|
||||
:param name: Name of the Url Map.
|
||||
|
@ -269,7 +269,7 @@ def create_healthcheck(client, params, project_id, resource_type='HTTP'):
|
|||
"""
|
||||
Create a new Healthcheck.
|
||||
|
||||
:param client: An initialized GCE Compute Disovery resource.
|
||||
:param client: An initialized GCE Compute Discovery resource.
|
||||
:type client: :class: `googleapiclient.discovery.Resource`
|
||||
|
||||
:param params: Dictionary of arguments from AnsibleModule.
|
||||
|
@ -297,7 +297,7 @@ def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
|
|||
"""
|
||||
Delete a Healthcheck.
|
||||
|
||||
:param client: An initialized GCE Compute Disover resource.
|
||||
:param client: An initialized GCE Compute Discovery resource.
|
||||
:type client: :class: `googleapiclient.discovery.Resource`
|
||||
|
||||
:param name: Name of the Url Map.
|
||||
|
@ -326,7 +326,7 @@ def update_healthcheck(client, healthcheck, params, name, project_id,
|
|||
|
||||
If the healthcheck has not changed, the update will not occur.
|
||||
|
||||
:param client: An initialized GCE Compute Disovery resource.
|
||||
:param client: An initialized GCE Compute Discovery resource.
|
||||
:type client: :class: `googleapiclient.discovery.Resource`
|
||||
|
||||
:param healthcheck: Name of the Url Map.
|
||||
|
|
|
@ -34,7 +34,7 @@ options:
|
|||
required: yes
|
||||
subscription:
|
||||
description:
|
||||
- Dictionary containing a subscripton name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
|
||||
- Dictionary containing a subscription name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
|
||||
For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
|
||||
See subfields name, push_endpoint and ack_deadline for more information.
|
||||
name:
|
||||
|
|
|
@ -27,7 +27,7 @@ options:
|
|||
- Filter facts
|
||||
choices: [ status, result ]
|
||||
notes:
|
||||
- See http://cloudinit.readthedocs.io/ for more information abount cloud-init.
|
||||
- See http://cloudinit.readthedocs.io/ for more information about cloud-init.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -33,7 +33,7 @@ description:
|
|||
- If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm
|
||||
will generate an error, normally crashing the current ansible task. This is normally the right
|
||||
thing since ignoring a value that IAM isn't giving access to could cause bigger problems and
|
||||
wrong behavour or loss of data. If you want to continue in this case then you will have to set
|
||||
wrong behaviour or loss of data. If you want to continue in this case then you will have to set
|
||||
up two ansible tasks, one which sets a variable and ignores failures one which uses the value
|
||||
of that variable with a default. See the examples below.
|
||||
|
||||
|
@ -144,7 +144,7 @@ class LookupModule(LookupBase):
|
|||
e.g. ['parameter_name', 'parameter_name_too' ]
|
||||
:kwarg variables: ansible variables active at the time of the lookup
|
||||
:kwarg aws_secret_key: identity of the AWS key to use
|
||||
:kwarg aws_access_key: AWS seret key (matching identity)
|
||||
:kwarg aws_access_key: AWS secret key (matching identity)
|
||||
:kwarg aws_security_token: AWS session key if using STS
|
||||
:kwarg decrypt: Set to True to get decrypted parameters
|
||||
:kwarg region: AWS region in which to do the lookup
|
||||
|
|
|
@ -202,7 +202,7 @@ class StrategyModule(StrategyBase):
|
|||
moving on to the next task
|
||||
'''
|
||||
|
||||
# iteratate over each task, while there is one left to run
|
||||
# iterate over each task, while there is one left to run
|
||||
result = self._tqm.RUN_OK
|
||||
work_to_do = True
|
||||
while work_to_do and not self._tqm._terminated:
|
||||
|
|
|
@ -202,7 +202,7 @@ class VariableManager:
|
|||
basedirs = [task.get_search_path()[0]]
|
||||
elif C.PLAYBOOK_VARS_ROOT != 'top':
|
||||
# preserves default basedirs, only option pre 2.3
|
||||
raise AnsibleError('Unkown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT)
|
||||
raise AnsibleError('Unknown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT)
|
||||
|
||||
# if we have a task in this context, and that task has a role, make
|
||||
# sure it sees its defaults above any other roles, as we previously
|
||||
|
|
|
@ -96,7 +96,7 @@ class TestErrors(unittest.TestCase):
|
|||
def test__load_module_source_no_duplicate_names(self):
|
||||
'''
|
||||
This test simulates importing 2 plugins with the same name,
|
||||
and validating that the import is shortcirtuited if a file with the same name
|
||||
and validating that the import is short circuited if a file with the same name
|
||||
has already been imported
|
||||
'''
|
||||
|
||||
|
@ -104,7 +104,7 @@ class TestErrors(unittest.TestCase):
|
|||
|
||||
pl = PluginLoader('test', '', 'test', 'test_plugin')
|
||||
one = pl._load_module_source('import_fixture', os.path.join(fixture_path, 'import_fixture.py'))
|
||||
# This line wouldn't even succeed if we didn't short cirtuit on finding a duplicate name
|
||||
# This line wouldn't even succeed if we didn't short circuit on finding a duplicate name
|
||||
two = pl._load_module_source('import_fixture', '/path/to/import_fixture.py')
|
||||
|
||||
self.assertEqual(one, two)
|
||||
|
|
Loading…
Reference in a new issue