parent
51b595992b
commit
630185cb20
11 changed files with 134 additions and 149 deletions
|
@ -18,4 +18,3 @@
|
|||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
|
|
@ -40,4 +40,3 @@ if 'action_write_locks' not in globals():
|
|||
mods.update(('copy', 'file', 'setup', 'slurp', 'stat'))
|
||||
for mod_name in mods:
|
||||
action_write_locks[mod_name] = Lock()
|
||||
|
||||
|
|
|
@ -690,7 +690,7 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
|
|||
|
||||
zf.writestr('ansible_module_%s.py' % module_name, b_module_data)
|
||||
|
||||
py_module_cache = { ('__init__',): (b'', '[builtin]') }
|
||||
py_module_cache = {('__init__',): (b'', '[builtin]')}
|
||||
recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
|
||||
zf.close()
|
||||
zipdata = base64.b64encode(zipoutput.getvalue())
|
||||
|
@ -721,8 +721,8 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
|
|||
try:
|
||||
zipdata = open(cached_module_filename, 'rb').read()
|
||||
except IOError:
|
||||
raise AnsibleError('A different worker process failed to create module file.'
|
||||
' Look at traceback for that process for debugging information.')
|
||||
raise AnsibleError('A different worker process failed to create module file. '
|
||||
'Look at traceback for that process for debugging information.')
|
||||
zipdata = to_text(zipdata, errors='surrogate_or_strict')
|
||||
|
||||
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
|
||||
|
@ -734,7 +734,7 @@ def _find_module_utils(module_name, b_module_data, module_path, module_args, tas
|
|||
interpreter_parts = interpreter.split(u' ')
|
||||
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
|
||||
|
||||
now=datetime.datetime.utcnow()
|
||||
now = datetime.datetime.utcnow()
|
||||
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
|
||||
zipdata=zipdata,
|
||||
ansible_module=module_name,
|
||||
|
@ -837,6 +837,7 @@ def modify_module(module_name, module_path, module_args, task_vars=dict(), modul
|
|||
|
||||
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
|
||||
|
||||
|
||||
def build_windows_module_payload(module_name, module_path, b_module_data, module_args, task_vars, task, play_context, environment):
|
||||
exec_manifest = dict(
|
||||
module_entry=to_text(base64.b64encode(b_module_data)),
|
||||
|
@ -856,7 +857,7 @@ def build_windows_module_payload(module_name, module_path, b_module_data, module
|
|||
exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
|
||||
exec_manifest["async_timeout_sec"] = task.async
|
||||
|
||||
if play_context.become and play_context.become_method=='runas':
|
||||
if play_context.become and play_context.become_method == 'runas':
|
||||
exec_manifest["actions"].insert(0, 'become')
|
||||
exec_manifest["become_user"] = play_context.become_user
|
||||
exec_manifest["become_password"] = play_context.become_pass
|
||||
|
|
|
@ -68,7 +68,7 @@ class HostState:
|
|||
return "UNKNOWN STATE"
|
||||
|
||||
def _failed_state_to_string(n):
|
||||
states = {1:"FAILED_SETUP", 2:"FAILED_TASKS", 4:"FAILED_RESCUE", 8:"FAILED_ALWAYS"}
|
||||
states = {1: "FAILED_SETUP", 2: "FAILED_TASKS", 4: "FAILED_RESCUE", 8: "FAILED_ALWAYS"}
|
||||
if n == 0:
|
||||
return "FAILED_NONE"
|
||||
else:
|
||||
|
@ -130,6 +130,7 @@ class HostState:
|
|||
new_state.always_child_state = self.always_child_state.copy()
|
||||
return new_state
|
||||
|
||||
|
||||
class PlayIterator:
|
||||
|
||||
# the primary running states for the play iteration
|
||||
|
@ -270,7 +271,6 @@ class PlayIterator:
|
|||
display.debug(" ^ state is: %s" % s)
|
||||
return (s, task)
|
||||
|
||||
|
||||
def _get_next_task_from_state(self, state, host, peek, in_child=False):
|
||||
|
||||
task = None
|
||||
|
@ -304,7 +304,7 @@ class PlayIterator:
|
|||
|
||||
if (gathering == 'implicit' and implied) or \
|
||||
(gathering == 'explicit' and boolean(self._play.gather_facts)) or \
|
||||
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name,{}).get('module_setup', False))):
|
||||
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('module_setup', False))):
|
||||
# The setup block is always self._blocks[0], as we inject it
|
||||
# during the play compilation in __init__ above.
|
||||
setup_block = self._blocks[0]
|
||||
|
@ -496,9 +496,9 @@ class PlayIterator:
|
|||
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
|
||||
return True
|
||||
elif state.fail_state != self.FAILED_NONE:
|
||||
if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0:
|
||||
if state.run_state == self.ITERATING_RESCUE and state.fail_state & self.FAILED_RESCUE == 0:
|
||||
return False
|
||||
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0:
|
||||
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state & self.FAILED_ALWAYS == 0:
|
||||
return False
|
||||
else:
|
||||
return not state.did_rescue
|
||||
|
@ -567,4 +567,3 @@ class PlayIterator:
|
|||
for b in task_list:
|
||||
self.cache_block_tasks(b)
|
||||
self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list)
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ class PlaybookExecutor:
|
|||
try:
|
||||
for playbook_path in self._playbooks:
|
||||
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
|
||||
#FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
|
||||
# FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
|
||||
|
||||
if self._tqm is None: # we are doing a listing
|
||||
entry = {'playbook': playbook_path}
|
||||
|
|
|
@ -18,4 +18,3 @@
|
|||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
|
|
@ -28,11 +28,11 @@ from jinja2.exceptions import TemplateNotFound
|
|||
|
||||
# TODO: not needed if we use the cryptography library with its default RNG
|
||||
# engine
|
||||
HAS_ATFORK=True
|
||||
HAS_ATFORK = True
|
||||
try:
|
||||
from Crypto.Random import atfork
|
||||
except ImportError:
|
||||
HAS_ATFORK=False
|
||||
HAS_ATFORK = False
|
||||
|
||||
from ansible.errors import AnsibleConnectionFailure
|
||||
from ansible.executor.task_executor import TaskExecutor
|
||||
|
@ -95,9 +95,9 @@ class WorkerProcess(multiprocessing.Process):
|
|||
signify that they are ready for their next task.
|
||||
'''
|
||||
|
||||
#import cProfile, pstats, StringIO
|
||||
#pr = cProfile.Profile()
|
||||
#pr.enable()
|
||||
# import cProfile, pstats, StringIO
|
||||
# pr = cProfile.Profile()
|
||||
# pr.enable()
|
||||
|
||||
if HAS_ATFORK:
|
||||
atfork()
|
||||
|
@ -160,11 +160,10 @@ class WorkerProcess(multiprocessing.Process):
|
|||
|
||||
display.debug("WORKER PROCESS EXITING")
|
||||
|
||||
#pr.disable()
|
||||
#s = StringIO.StringIO()
|
||||
#sortby = 'time'
|
||||
#ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
|
||||
#ps.print_stats()
|
||||
#with open('worker_%06d.stats' % os.getpid(), 'w') as f:
|
||||
# pr.disable()
|
||||
# s = StringIO.StringIO()
|
||||
# sortby = 'time'
|
||||
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
|
||||
# ps.print_stats()
|
||||
# with open('worker_%06d.stats' % os.getpid(), 'w') as f:
|
||||
# f.write(s.getvalue())
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ from collections import MutableMapping
|
|||
|
||||
from ansible.utils.vars import merge_hash
|
||||
|
||||
|
||||
class AggregateStats:
|
||||
''' holds stats about per-host activity during playbook runs '''
|
||||
|
||||
|
@ -43,17 +44,17 @@ class AggregateStats:
|
|||
|
||||
self.processed[host] = 1
|
||||
prev = (getattr(self, what)).get(host, 0)
|
||||
getattr(self, what)[host] = prev+1
|
||||
getattr(self, what)[host] = prev + 1
|
||||
|
||||
def summarize(self, host):
|
||||
''' return information about a particular host '''
|
||||
|
||||
return dict(
|
||||
ok = self.ok.get(host, 0),
|
||||
failures = self.failures.get(host, 0),
|
||||
unreachable = self.dark.get(host,0),
|
||||
changed = self.changed.get(host, 0),
|
||||
skipped = self.skipped.get(host, 0)
|
||||
ok=self.ok.get(host, 0),
|
||||
failures=self.failures.get(host, 0),
|
||||
unreachable=self.dark.get(host, 0),
|
||||
changed=self.changed.get(host, 0),
|
||||
skipped=self.skipped.get(host, 0),
|
||||
)
|
||||
|
||||
def set_custom_stats(self, which, what, host=None):
|
||||
|
@ -83,4 +84,3 @@ class AggregateStats:
|
|||
else:
|
||||
# let overloaded + take care of other types
|
||||
self.custom[host][which] += what
|
||||
|
||||
|
|
|
@ -148,7 +148,7 @@ class TaskExecutor:
|
|||
else:
|
||||
raise
|
||||
elif isinstance(res, list):
|
||||
for idx,item in enumerate(res):
|
||||
for (idx, item) in enumerate(res):
|
||||
res[idx] = _clean_res(item, errors=errors)
|
||||
return res
|
||||
|
||||
|
@ -189,7 +189,6 @@ class TaskExecutor:
|
|||
# get search path for this task to pass to lookup plugins
|
||||
self._job_vars['ansible_search_path'] = self._task.get_search_path()
|
||||
|
||||
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
|
||||
items = None
|
||||
if self._task.loop:
|
||||
|
@ -211,7 +210,7 @@ class TaskExecutor:
|
|||
for subdir in ['template', 'var', 'file']: # TODO: move this to constants?
|
||||
if subdir in self._task.action:
|
||||
break
|
||||
setattr(mylookup,'_subdir', subdir + 's')
|
||||
setattr(mylookup, '_subdir', subdir + 's')
|
||||
|
||||
# run lookup
|
||||
items = mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True)
|
||||
|
@ -249,7 +248,7 @@ class TaskExecutor:
|
|||
|
||||
# make copies of the job vars and task so we can add the item to
|
||||
# the variables and re-validate the task with the item variable
|
||||
#task_vars = self._job_vars.copy()
|
||||
# task_vars = self._job_vars.copy()
|
||||
task_vars = self._job_vars
|
||||
|
||||
loop_var = 'item'
|
||||
|
@ -369,7 +368,7 @@ class TaskExecutor:
|
|||
else:
|
||||
# Restore the name parameter
|
||||
self._task.args['name'] = name
|
||||
#elif:
|
||||
# elif:
|
||||
# Right now we only optimize single entries. In the future we
|
||||
# could optimize more types:
|
||||
# * lists can be squashed together
|
||||
|
@ -544,7 +543,7 @@ class TaskExecutor:
|
|||
if self._task.async > 0:
|
||||
if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
|
||||
result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
|
||||
#FIXME callback 'v2_runner_on_async_poll' here
|
||||
# FIXME callback 'v2_runner_on_async_poll' here
|
||||
|
||||
# ensure no log is preserved
|
||||
result["_ansible_no_log"] = self._play_context.no_log
|
||||
|
@ -651,7 +650,7 @@ class TaskExecutor:
|
|||
|
||||
async_task = Task().load(dict(action='async_status jid=%s' % async_jid))
|
||||
|
||||
#FIXME: this is no longer the case, normal takes care of all, see if this can just be generalized
|
||||
# FIXME: this is no longer the case, normal takes care of all, see if this can just be generalized
|
||||
# Because this is an async task, the action handler is async. However,
|
||||
# we need the 'normal' action handler for the status check, so get it
|
||||
# now via the action_loader
|
||||
|
|
|
@ -145,7 +145,7 @@ class TaskQueueManager:
|
|||
if handler.listen:
|
||||
listeners = handler.listen
|
||||
if not isinstance(listeners, list):
|
||||
listeners = [ listeners ]
|
||||
listeners = [listeners]
|
||||
for listener in listeners:
|
||||
if listener not in self._listening_handlers:
|
||||
self._listening_handlers[listener] = []
|
||||
|
@ -262,7 +262,7 @@ class TaskQueueManager:
|
|||
play_context=play_context,
|
||||
variable_manager=self._variable_manager,
|
||||
all_vars=all_vars,
|
||||
start_at_done = self._start_at_done,
|
||||
start_at_done=self._start_at_done,
|
||||
)
|
||||
|
||||
# Because the TQM may survive multiple play runs, we start by marking
|
||||
|
@ -332,7 +332,7 @@ class TaskQueueManager:
|
|||
# <WorkerProcess(WorkerProcess-2, stopped[SIGTERM])>
|
||||
|
||||
defunct = False
|
||||
for idx,x in enumerate(self._workers):
|
||||
for (idx, x) in enumerate(self._workers):
|
||||
if hasattr(x[0], 'exitcode'):
|
||||
if x[0].exitcode in [-9, -11, -15]:
|
||||
defunct = True
|
||||
|
@ -350,7 +350,7 @@ class TaskQueueManager:
|
|||
for possible in [method_name, 'v2_on_any']:
|
||||
gotit = getattr(callback_plugin, possible, None)
|
||||
if gotit is None:
|
||||
gotit = getattr(callback_plugin, possible.replace('v2_',''), None)
|
||||
gotit = getattr(callback_plugin, possible.replace('v2_', ''), None)
|
||||
if gotit is not None:
|
||||
methods.append(gotit)
|
||||
|
||||
|
|
|
@ -10,20 +10,10 @@ lib/ansible/cli/playbook.py
|
|||
lib/ansible/cli/pull.py
|
||||
lib/ansible/cli/vault.py
|
||||
lib/ansible/constants.py
|
||||
lib/ansible/executor/__init__.py
|
||||
lib/ansible/executor/action_write_locks.py
|
||||
lib/ansible/executor/module_common.py
|
||||
lib/ansible/executor/play_iterator.py
|
||||
lib/ansible/executor/playbook_executor.py
|
||||
lib/ansible/executor/process/__init__.py
|
||||
lib/ansible/executor/process/worker.py
|
||||
lib/ansible/executor/stats.py
|
||||
lib/ansible/executor/task_executor.py
|
||||
lib/ansible/executor/task_queue_manager.py
|
||||
lib/ansible/inventory/manager.py
|
||||
lib/ansible/inventory/data.py
|
||||
lib/ansible/inventory/group.py
|
||||
lib/ansible/inventory/host.py
|
||||
lib/ansible/inventory/manager.py
|
||||
lib/ansible/module_utils/_text.py
|
||||
lib/ansible/module_utils/a10.py
|
||||
lib/ansible/module_utils/ansible_tower.py
|
||||
|
|
Loading…
Add table
Reference in a new issue