Merge branch 'retf_py' of git://github.com/berendt/ansible into devel

Conflicts:
	lib/ansible/inventory/vars_plugins/group_vars.py
	plugins/callbacks/mail.py
This commit is contained in:
Michael DeHaan 2014-08-08 12:17:37 -04:00
commit 8c4161d4a1
21 changed files with 48 additions and 48 deletions

View file

@ -42,7 +42,7 @@ if constants.ANSIBLE_FORCE_COLOR:
# --- begin "pretty"
#
# pretty - A miniature library that provides a Python print and stdout
# wrapper that makes colored terminal text easier to use (eg. without
# wrapper that makes colored terminal text easier to use (e.g. without
# having to mess around with ANSI escape sequences). This code is public
# domain - there is no license except that you must leave this header.
#

View file

@ -86,7 +86,7 @@ class Inventory(object):
else:
if ":" in x:
tokens = x.rsplit(":", 1)
# if there is ':' in the address, then this is a ipv6
# if there is ':' in the address, then this is an ipv6
if ':' in tokens[0]:
all.add_host(Host(x))
else:
@ -237,7 +237,7 @@ class Inventory(object):
def __get_hosts(self, pattern):
"""
finds hosts that postively match a particular pattern. Does not
finds hosts that positively match a particular pattern. Does not
take into account negative matches.
"""

View file

@ -28,7 +28,7 @@ formatting hint when the range is expanded. e.g. [001:010] is to be
expanded into 001, 002 ...009, 010.
Note that when beg is specified with left zero padding, then the length of
end must be the same as that of beg, else a exception is raised.
end must be the same as that of beg, else an exception is raised.
'''
import string
@ -37,7 +37,7 @@ from ansible import errors
def detect_range(line = None):
'''
A helper function that checks a given host line to see if it contains
a range pattern descibed in the docstring above.
a range pattern described in the docstring above.
Returnes True if the given line contains a pattern, else False.
'''
@ -102,7 +102,7 @@ def expand_hostname_range(line = None):
if i_beg > i_end:
raise errors.AnsibleError("host range format incorrectly specified!")
seq = string.ascii_letters[i_beg:i_end+1]
except ValueError: # not a alpha range
except ValueError: # not an alpha range
seq = range(int(beg), int(end)+1, int(step))
for rseq in seq:

View file

@ -647,7 +647,7 @@ class AnsibleModule(object):
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
self.fail_json(msg="internal error: required and default are mutally exclusive for %s" % k)
self.fail_json(msg="internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if type(aliases) != list:
@ -1124,7 +1124,7 @@ class AnsibleModule(object):
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(src, dest)
except (IOError,OSError), e:
# only try workarounds for errno 18 (cross device), 1 (not permited) and 13 (permission denied)
# only try workarounds for errno 18 (cross device), 1 (not permitted) and 13 (permission denied)
if e.errno != errno.EPERM and e.errno != errno.EXDEV and e.errno != errno.EACCES:
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
@ -1256,7 +1256,7 @@ class AnsibleModule(object):
try:
os.chdir(cwd)
except (OSError, IOError), e:
self.fail_json(rc=e.errno, msg="Could not open %s , %s" % (cwd, str(e)))
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
try:
cmd = subprocess.Popen(args, **kwargs)

View file

@ -76,7 +76,7 @@ class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (eg. ssh keys or selinux), and distribution facts.
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
@ -1020,7 +1020,7 @@ class FreeBSDHardware(Hardware):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1] , 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
def get_device_facts(self):
sysdir = '/dev'
@ -1147,7 +1147,7 @@ class NetBSDHardware(Hardware):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1] , 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
class AIX(Hardware):
"""
@ -1552,7 +1552,7 @@ class LinuxNetwork(Network):
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = open(path).read() == '1'
# Check whether a interface is in promiscuous mode
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.

View file

@ -241,7 +241,7 @@ class RhsmPools(object):
def _load_product_list(self):
"""
Loads list of all availaible pools for system in data structure
Loads list of all available pools for system in data structure
"""
args = "subscription-manager list --available"
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
@ -250,7 +250,7 @@ class RhsmPools(object):
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of a output group
# An empty line implies the end of an output group
if len(line) == 0:
continue
# If a colon ':' is found, parse

View file

@ -84,7 +84,7 @@ class PlayBook(object):
playbook: path to a playbook file
host_list: path to a file like /etc/ansible/hosts
module_path: path to ansible modules, like /usr/share/ansible/
forks: desired level of paralellism
forks: desired level of parallelism
timeout: connection timeout
remote_user: run as this user if not specified in a particular play
remote_pass: use this remote password (for all plays) vs using SSH keys
@ -93,7 +93,7 @@ class PlayBook(object):
transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
callbacks output callbacks for the playbook
runner_callbacks: more callbacks, this time for the runner API
stats: holds aggregrate data about events occuring to each host
stats: holds aggregrate data about events occurring to each host
sudo: if not specified per play, requests all plays use sudo mode
inventory: can be specified instead of host_list to use a pre-existing inventory object
check: don't change anything, just try to detect some potential changes
@ -240,7 +240,7 @@ class PlayBook(object):
utils.plugins.push_basedir(basedir)
for play in playbook_data:
if type(play) != dict:
raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), recieved: %s" % play)
raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
if 'include' in play:
# a playbook (list of plays) decided to include some other list of plays
@ -456,7 +456,7 @@ class PlayBook(object):
# template ignore_errors
cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False)
task.ignore_errors = utils.check_conditional(cond , play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
# load up an appropriate ansible runner to run the task in parallel
results = self._run_task_internal(task)
@ -594,7 +594,7 @@ class PlayBook(object):
def generate_retry_inventory(self, replay_hosts):
'''
called by /usr/bin/ansible when a playbook run fails. It generates a inventory
called by /usr/bin/ansible when a playbook run fails. It generates an inventory
that allows re-running on ONLY the failed hosts. This may duplicate some
variable information in group_vars/host_vars but that is ok, and expected.
'''
@ -697,7 +697,7 @@ class PlayBook(object):
if task.any_errors_fatal and len(host_list) < hosts_count:
play.max_fail_pct = 0
# If threshold for max nodes failed is exceeded , bail out.
# If threshold for max nodes failed is exceeded, bail out.
if play.serial > 0:
# if serial is set, we need to shorten the size of host_count
play_count = len(play._play_hosts)

View file

@ -366,7 +366,7 @@ class Play(object):
roles = self._build_role_dependencies(roles, [], self.vars)
# give each role a uuid
# give each role an uuid
for idx, val in enumerate(roles):
this_uuid = str(uuid.uuid4())
roles[idx][-2]['role_uuid'] = this_uuid
@ -745,7 +745,7 @@ class Play(object):
else:
role_tags[this_role] += task['vars']['tags']
# apply each role's tags to it's tasks
# apply each role's tags to its tasks
for idx, val in enumerate(self._tasks):
if getattr(val, 'role_name', None) is not None:
this_role = val.role_name + "-" + val.module_vars['role_uuid']

View file

@ -259,7 +259,7 @@ class Task(object):
if len(incompatibles) > 1:
raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
# make first_available_file accessable to Runner code
# make first_available_file accessible to Runner code
if self.first_available_file:
self.module_vars['first_available_file'] = self.first_available_file
# make sure that the 'item' variable is set when using

View file

@ -324,7 +324,7 @@ class Runner(object):
this_host = delegate['host']
# get the vars for the delegate by it's name
# get the vars for the delegate by its name
try:
this_info = delegate['inject']['hostvars'][this_host]
except:
@ -348,7 +348,7 @@ class Runner(object):
delegate['sudo_pass'] = this_info.get('ansible_sudo_pass', self.sudo_pass)
# Last chance to get private_key_file from global variables.
# this is usefull if delegated host is not defined in the inventory
# this is useful if delegated host is not defined in the inventory
if delegate['private_key_file'] is None:
delegate['private_key_file'] = remote_inject.get(
'ansible_ssh_private_key_file', None)
@ -364,7 +364,7 @@ class Runner(object):
def _compute_delegate_user(self, host, inject):
""" Caculate the remote user based on an order of preference """
""" Calculate the remote user based on an order of preference """
# inventory > playbook > original_host
@ -829,7 +829,7 @@ class Runner(object):
if actual_transport == 'accelerate':
# for accelerate, we stuff both ports into a single
# variable so that we don't have to mangle other function
# calls just to accomodate this one case
# calls just to accommodate this one case
actual_port = [actual_port, self.accelerate_port]
elif actual_port is not None:
actual_port = int(template.template(self.basedir, actual_port, inject))

View file

@ -112,7 +112,7 @@ class ActionModule(object):
conn.put_file(source, tmp_src)
sudoable = True
# set file permissions, more permisive when the copy is done as a different user
# set file permissions, more permissive when the copy is done as a different user
if ((self.runner.sudo and self.runner.sudo_user != 'root') or
(self.runner.su and self.runner.su_user != 'root')):
chmod_mode = 'a+rx'

View file

@ -96,7 +96,7 @@ try:
system_warning(
"The version of gmp you have installed has a known issue regarding " + \
"timing vulnerabilities when used with pycrypto. " + \
"If possible, you should update it (ie. yum update gmp)."
"If possible, you should update it (i.e. yum update gmp)."
)
warnings.resetwarnings()
warnings.simplefilter("ignore")
@ -771,7 +771,7 @@ def _gitinfo():
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a posibility the .git file to have an absolute path.
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:

View file

@ -195,7 +195,7 @@ class J2Template(jinja2.environment.Template):
This class prevents Jinja2 from running _jinja2_vars through dict()
Without this, {% include %} and similar will create new contexts unlike
the special one created in template_from_file. This ensures they are all
alike, with the exception of potential locals.
alike, except for potential locals.
'''
def new_context(self, vars=None, shared=False, locals=None):
return jinja2.runtime.Context(self.environment, vars.add_locals(locals), self.name, self.blocks)
@ -362,7 +362,7 @@ def template_from_string(basedir, data, vars, fail_on_undefined=False):
"Make sure your variable name does not contain invalid characters like '-'."
)
else:
raise errors.AnsibleError("an unexpected type error occured. Error was %s" % te)
raise errors.AnsibleError("an unexpected type error occurred. Error was %s" % te)
return res
except (jinja2.exceptions.UndefinedError, errors.AnsibleUndefinedVariable):
if fail_on_undefined:

View file

@ -70,10 +70,10 @@ class CallbackModule(object):
sender = '"Ansible: %s" <root>' % host
if isinstance(res, basestring):
subject = 'Unreachable: %s' % res.strip('\r\n').split('\n')[-1]
body = 'An error occured for host ' + host + ' with the following message:\n\n' + res
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + res
else:
subject = 'Unreachable: %s' % res['msg'].strip('\r\n').split('\n')[0]
body = 'An error occured for host ' + host + ' with the following message:\n\n' + \
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + \
res['msg'] + '\n\nA complete dump of the error:\n\n' + str(res)
mail(sender=sender, subject=subject, body=body)
@ -81,9 +81,9 @@ class CallbackModule(object):
sender = '"Ansible: %s" <root>' % host
if isinstance(res, basestring):
subject = 'Async failure: %s' % res.strip('\r\n').split('\n')[-1]
body = 'An error occured for host ' + host + ' with the following message:\n\n' + res
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + res
else:
subject = 'Async failure: %s' % res['msg'].strip('\r\n').split('\n')[0]
body = 'An error occured for host ' + host + ' with the following message:\n\n' + \
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + \
res['msg'] + '\n\nA complete dump of the error:\n\n' + str(res)
mail(sender=sender, subject=subject, body=body)

View file

@ -157,7 +157,7 @@ class DigitalOceanInventory(object):
# DigitalOceanInventory data
self.data = {} # All DigitalOcean data
self.inventory = {} # Ansible Inventory
self.index = {} # Varous indices of Droplet metadata
self.index = {} # Various indices of Droplet metadata
# Define defaults
self.cache_path = '.'
@ -368,7 +368,7 @@ or environment variables (DO_CLIENT_ID and DO_API_KEY)'''
def load_droplet_variables_for_host(self):
'''Generate a JSON reponse to a --host call'''
'''Generate a JSON response to a --host call'''
host = self.to_safe(str(self.args.host))
if not host in self.index['host_to_droplet']:

View file

@ -25,7 +25,7 @@
# inventory information.
#
# This plugin does not support targeting of specific hosts using the --host
# flag. Instead, it it queries the Docker API for each container, running
# flag. Instead, it queries the Docker API for each container, running
# or not, and returns this data all once.
#
# The plugin returns the following custom attributes on Docker containers:
@ -196,7 +196,7 @@ def setup():
write_stderr(e)
sys.exit(1)
# Enviroment Variables
# Environment Variables
env_base_url = os.environ.get('DOCKER_HOST')
env_version = os.environ.get('DOCKER_VERSION')
env_timeout = os.environ.get('DOCKER_TIMEOUT')

View file

@ -34,4 +34,4 @@ if len(sys.argv) == 2 and sys.argv[1] == '--list':
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print json.dumps({'ansible_connection': 'jail'})
else:
print "Need a argument, either --list or --host <host>"
print "Need an argument, either --list or --host <host>"

View file

@ -34,4 +34,4 @@ if len(sys.argv) == 2 and sys.argv[1] == '--list':
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print json.dumps({'ansible_connection': 'lxc'})
else:
print "Need a argument, either --list or --host <host>"
print "Need an argument, either --list or --host <host>"

View file

@ -23,7 +23,7 @@ inventory: openshift
short_description: Openshift gears external inventory script
description:
- Generates inventory of Openshift gears using the REST interface
- this permit to reuse playbook to setup a Openshift gear
- this permit to reuse playbook to setup an Openshift gear
version_added: None
author: Michael Scherer
'''
@ -113,4 +113,4 @@ if len(sys.argv) == 2 and sys.argv[1] == '--list':
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print json.dumps({})
else:
print "Need a argument, either --list or --host <host>"
print "Need an argument, either --list or --host <host>"

View file

@ -8,11 +8,11 @@ shamelessly copied from existing inventory scripts.
This script and it's ini can be used more than once,
i.e vmware.py/vmware_colo.ini vmware_idf.py/vmware_idf.ini
i.e. vmware.py/vmware_colo.ini vmware_idf.py/vmware_idf.ini
(script can be link)
so if you don't have clustered vcenter but multiple esx machines or
just diff clusters you can have a inventory per each and automatically
just diff clusters you can have an inventory per each and automatically
group hosts based on file name or specify a group in the ini.
You can also use <SCRIPT_NAME>_HOST|USER|PASSWORD environment variables

View file

@ -133,7 +133,7 @@ class TestSynchronize(unittest.TestCase):
def test_synchronize_action_vagrant(self):
""" Verify the action plugin accomodates the common
""" Verify the action plugin accommodates the common
scenarios for vagrant boxes. """
runner = FakeRunner()