Splitting SETUP_CACHE into two caches, one for host vars and one for setup facts

This commit is contained in:
James Cammarata 2014-04-01 09:48:14 -05:00
parent a4df906fc9
commit e2d86e4f43
5 changed files with 40 additions and 22 deletions

View file

@ -845,8 +845,11 @@ If multiple variables of the same name are defined in different places, they win
* -e variables always win
* then comes "most everything else"
* then comes variables defined in inventory
* then comes facts discovered about a system
* then "role defaults", which are the most "defaulty" and lose in priority to everything.
.. note:: In versions prior to 1.5.4, facts discovered about a system were in the "most everything else" category above.
That seems a little theoretical. Let's show some examples and where you would choose to put what based on the kind of
control you might want over values.

View file

@ -29,7 +29,11 @@ from play import Play
import StringIO
import pipes
# the setup cache stores all variables about a host
# gathered during the setup step, while the vars cache
# holds all other variables about a host
SETUP_CACHE = collections.defaultdict(dict)
VARS_CACHE = collections.defaultdict(dict)
class PlayBook(object):
'''
@ -98,6 +102,7 @@ class PlayBook(object):
"""
self.SETUP_CACHE = SETUP_CACHE
self.VARS_CACHE = VARS_CACHE
arguments = []
if playbook is None:
@ -304,7 +309,7 @@ class PlayBook(object):
# since these likely got killed by async_wrapper
for host in poller.hosts_to_poll:
reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
self.runner_callbacks.on_async_failed(host, reason, poller.runner.setup_cache[host]['ansible_job_id'])
self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id'])
results['contacted'][host] = reason
return results
@ -339,6 +344,7 @@ class PlayBook(object):
default_vars=task.default_vars,
private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE,
vars_cache=self.VARS_CACHE,
basedir=task.play.basedir,
conditional=task.when,
callbacks=self.runner_callbacks,
@ -375,7 +381,7 @@ class PlayBook(object):
results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
else:
for (host, res) in results.get('contacted', {}).iteritems():
self.runner_callbacks.on_async_ok(host, res, poller.runner.setup_cache[host]['ansible_job_id'])
self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id'])
contacted = results.get('contacted',{})
dark = results.get('dark', {})
@ -434,8 +440,6 @@ class PlayBook(object):
else:
facts = result.get('ansible_facts', {})
self.SETUP_CACHE[host].update(facts)
# extra vars need to always trump - so update again following the facts
self.SETUP_CACHE[host].update(self.extra_vars)
if task.register:
if 'stdout' in result and 'stdout_lines' not in result:
result['stdout_lines'] = result['stdout'].splitlines()
@ -512,6 +516,7 @@ class PlayBook(object):
remote_port=play.remote_port,
private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE,
vars_cache=self.VARS_CACHE,
callbacks=self.runner_callbacks,
sudo=play.sudo,
sudo_user=play.sudo_user,

View file

@ -766,7 +766,7 @@ class Play(object):
if host is not None:
inject = {}
inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password))
inject.update(self.playbook.SETUP_CACHE[host])
inject.update(self.playbook.VARS_CACHE[host])
for filename in self.vars_files:
@ -790,9 +790,9 @@ class Play(object):
if host is not None:
if self._has_vars_in(filename2) and not self._has_vars_in(filename3):
# this filename has variables in it that were fact specific
# so it needs to be loaded into the per host SETUP_CACHE
# so it needs to be loaded into the per host VARS_CACHE
data = utils.combine_vars(inject, data)
self.playbook.SETUP_CACHE[host].update(data)
self.playbook.VARS_CACHE[host].update(data)
self.playbook.callbacks.on_import_for_host(host, filename4)
elif not self._has_vars_in(filename4):
# found a non-host specific variable, load into vars and NOT
@ -825,9 +825,13 @@ class Play(object):
# running a host specific pass and has host specific variables
# load into setup cache
new_vars = utils.combine_vars(inject, new_vars)
self.playbook.SETUP_CACHE[host] = utils.combine_vars(
self.playbook.SETUP_CACHE[host], new_vars)
self.playbook.VARS_CACHE[host] = utils.combine_vars(
self.playbook.VARS_CACHE[host], new_vars)
self.playbook.callbacks.on_import_for_host(host, filename4)
elif host is None:
# running a non-host specific pass and we can update the global vars instead
self.vars = utils.combine_vars(self.vars, new_vars)
# finally, update the VARS_CACHE for the host, if it is set
if host is not None:
self.playbook.VARS_CACHE[host].update(self.playbook.extra_vars)

View file

@ -80,18 +80,18 @@ def _executor_hook(job_queue, result_queue, new_stdin):
traceback.print_exc()
class HostVars(dict):
''' A special view of setup_cache that adds values from the inventory when needed. '''
''' A special view of vars_cache that adds values from the inventory when needed. '''
def __init__(self, setup_cache, inventory):
self.setup_cache = setup_cache
def __init__(self, vars_cache, inventory):
self.vars_cache = vars_cache
self.inventory = inventory
self.lookup = dict()
self.update(setup_cache)
self.update(vars_cache)
def __getitem__(self, host):
if host not in self.lookup:
result = self.inventory.get_variables(host)
result.update(self.setup_cache.get(host, {}))
result.update(self.vars_cache.get(host, {}))
self.lookup[host] = result
return self.lookup[host]
@ -117,6 +117,7 @@ class Runner(object):
background=0, # async poll every X seconds, else 0 for non-async
basedir=None, # directory of playbook, if applicable
setup_cache=None, # used to share fact data w/ other tasks
vars_cache=None, # used to store variables about hosts
transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local'
conditional='True', # run only if this fact expression evals to true
callbacks=None, # used for output
@ -154,6 +155,7 @@ class Runner(object):
self.check = check
self.diff = diff
self.setup_cache = utils.default(setup_cache, lambda: collections.defaultdict(dict))
self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict))
self.basedir = utils.default(basedir, lambda: os.getcwd())
self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks())
self.generated_jid = str(random.randint(0, 999999999999))
@ -550,13 +552,17 @@ class Runner(object):
module_vars = template.template(self.basedir, self.module_vars, host_variables)
# merge the VARS and SETUP caches for this host
combined_cache = self.setup_cache.copy()
combined_cache.get(host, {}).update(self.vars_cache.get(host, {}))
inject = {}
inject = utils.combine_vars(inject, self.default_vars)
inject = utils.combine_vars(inject, host_variables)
inject = utils.combine_vars(inject, module_vars)
inject = utils.combine_vars(inject, self.setup_cache[host])
inject = utils.combine_vars(inject, combined_cache.get(host, {}))
inject.setdefault('ansible_ssh_user', self.remote_user)
inject['hostvars'] = HostVars(self.setup_cache, self.inventory)
inject['hostvars'] = HostVars(combined_cache, self.inventory)
inject['group_names'] = host_variables.get('group_names', [])
inject['groups'] = self.inventory.groups_list()
inject['vars'] = self.module_vars

View file

@ -38,13 +38,13 @@ class AsyncPoller(object):
if res.get('started', False):
self.hosts_to_poll.append(host)
jid = res.get('ansible_job_id', None)
self.runner.setup_cache[host]['ansible_job_id'] = jid
self.runner.vars_cache[host]['ansible_job_id'] = jid
self.active = True
else:
skipped = skipped & res.get('skipped', False)
self.results['contacted'][host] = res
for (host, res) in results['dark'].iteritems():
self.runner.setup_cache[host]['ansible_job_id'] = ''
self.runner.vars_cache[host]['ansible_job_id'] = ''
self.results['dark'][host] = res
if not skipped:
@ -77,14 +77,14 @@ class AsyncPoller(object):
self.results['contacted'][host] = res
poll_results['contacted'][host] = res
if res.get('failed', False) or res.get('rc', 0) != 0:
self.runner.callbacks.on_async_failed(host, res, self.runner.setup_cache[host]['ansible_job_id'])
self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host]['ansible_job_id'])
else:
self.runner.callbacks.on_async_ok(host, res, self.runner.setup_cache[host]['ansible_job_id'])
self.runner.callbacks.on_async_ok(host, res, self.runner.vars_cache[host]['ansible_job_id'])
for (host, res) in results['dark'].iteritems():
self.results['dark'][host] = res
poll_results['dark'][host] = res
if host in self.hosts_to_poll:
self.runner.callbacks.on_async_failed(host, res, self.runner.setup_cache[host].get('ansible_job_id','XX'))
self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host].get('ansible_job_id','XX'))
self.hosts_to_poll = hosts
if len(hosts)==0:
@ -106,7 +106,7 @@ class AsyncPoller(object):
for (host, res) in poll_results['polled'].iteritems():
if res.get('started'):
self.runner.callbacks.on_async_poll(host, res, self.runner.setup_cache[host]['ansible_job_id'], clock)
self.runner.callbacks.on_async_poll(host, res, self.runner.vars_cache[host]['ansible_job_id'], clock)
clock = clock - poll_interval