Playbook refactoring -- work in progress.

This commit is contained in:
Michael DeHaan 2012-05-26 00:37:34 -04:00
parent cf9ddf3a30
commit b9b53d1941
5 changed files with 306 additions and 341 deletions

View file

@ -4,7 +4,7 @@
# because of the comments it's less concise than it normally is. But feel
# free to comment your playbooks if you like.
- hosts: dbservers
- hosts: all
# we can define variables the normal way...

View file

@ -23,9 +23,7 @@ import ansible.constants as C
from ansible import utils
from ansible import errors
import os
# used to transfer variables to Runner
SETUP_CACHE={ }
from play import Play
#############################################
@ -82,6 +80,8 @@ class PlayBook(object):
sudo: if not specified per play, requests all plays use sudo mode
"""
self.SETUP_CACHE = {}
if playbook is None or callbacks is None or runner_callbacks is None or stats is None:
raise Exception('missing required arguments')
@ -108,132 +108,11 @@ class PlayBook(object):
self.inventory = ansible.inventory.Inventory(host_list)
if not self.inventory._is_script:
self.global_vars.update(self.inventory.get_group_variables('all'))
self.basedir = os.path.dirname(playbook)
self.playbook = self._parse_playbook(playbook)
# *****************************************************
def _get_vars(self, play, dirname):
''' load the vars section from a play '''
if play.get('vars') is None:
play['vars'] = {}
if type(play['vars']) not in [dict, list]:
raise errors.AnsibleError("'vars' section must contain only key/value pairs")
vars = self.global_vars
# translate a list of vars into a dict
if type(play['vars']) == list:
for item in play['vars']:
k, v = item.items()[0]
vars[k] = v
else:
vars.update(play['vars'])
vars_prompt = play.get('vars_prompt', {})
if type(vars_prompt) != dict:
raise errors.AnsibleError("'vars_prompt' section must contain only key/value pairs")
for vname in vars_prompt:
# TODO: make this prompt one line and consider double entry
print vars_prompt[vname]
vars[vname] = self.callbacks.on_vars_prompt(vname)
results = self.extra_vars.copy()
results.update(vars)
return results
# *****************************************************
def _include_tasks(self, play, task, dirname, new_tasks):
''' load tasks included from external files. '''
# include: some.yml a=2 b=3 c=4
play_vars = self._get_vars(play, dirname)
include_tokens = utils.template(task['include'], play_vars, SETUP_CACHE).split()
path = utils.path_dwim(dirname, include_tokens[0])
include_vars = {}
for i,x in enumerate(include_tokens):
if x.find("=") != -1:
(k,v) = x.split("=")
include_vars[k] = v
inject_vars = play_vars.copy()
inject_vars.update(include_vars)
included = utils.parse_yaml_from_file(path)
for x in included:
if len(include_vars):
x["vars"] = include_vars
new_tasks.append(x)
# *****************************************************
def _include_handlers(self, play, handler, dirname, new_handlers):
''' load handlers from external files '''
inject_vars = self._get_vars(play, dirname)
path = utils.template(handler['include'], inject_vars, SETUP_CACHE)
path = utils.path_dwim(dirname, path)
included = utils.template_from_file(path, inject_vars, SETUP_CACHE)
included = utils.parse_yaml(included)
for x in included:
new_handlers.append(x)
# *****************************************************
def _parse_playbook(self, playbook):
''' load YAML file, including handling for imported files '''
dirname = os.path.dirname(playbook)
playbook = utils.parse_yaml_from_file(playbook)
for play in playbook:
tasks = play.get('tasks',[])
handlers = play.get('handlers', [])
# process tasks in this file as well as imported tasks
new_tasks = []
for task in tasks:
if 'include' in task:
self._include_tasks(play, task, dirname, new_tasks)
else:
new_tasks.append(task)
# now new_tasks contains a list of tasks, but tasks may contain
# lists of with_items to loop over. Do that.
# TODO: refactor into subfunction
new_tasks2 = []
for task in new_tasks:
if 'with_items' in task:
for item in task['with_items']:
produced_task = {}
name = task.get('name', task.get('action', 'unnamed task'))
action = task.get('action', None)
only_if = task.get('only_if', None)
if action is None:
raise errors.AnsibleError('action is required')
produced_task = task.copy()
produced_task['action'] = utils.template(action, dict(item=item), SETUP_CACHE)
produced_task['name'] = utils.template(name, dict(item=item), SETUP_CACHE)
if only_if:
produced_task['only_if'] = utils.template(only_if, dict(item=item), SETUP_CACHE)
new_tasks2.append(produced_task)
else:
new_tasks2.append(task)
play['tasks'] = new_tasks2
# process handlers as well as imported handlers
new_handlers = []
for handler in handlers:
if 'include' in handler:
self._include_handlers(play, handler, dirname, new_handlers)
else:
new_handlers.append(handler)
play['handlers'] = new_handlers
return playbook
self.basedir = os.path.dirname(playbook)
self.playbook = utils.parse_yaml_from_file(playbook)
# *****************************************************
@ -242,8 +121,8 @@ class PlayBook(object):
# loop through all patterns and run them
self.callbacks.on_start()
for pattern in self.playbook:
self._run_play(pattern)
for play_ds in self.playbook:
self._run_play(Play(self,play_ds))
# summarize the results
results = {}
@ -269,190 +148,88 @@ class PlayBook(object):
# *****************************************************
def _run_module(self, pattern, module, args, vars, remote_user,
async_seconds, async_poll_interval, only_if, sudo, sudo_user, transport, port):
def _run_task_internal(self, task):
''' run a particular module step in a playbook '''
hosts = [ h for h in self.inventory.list_hosts() if (h not in self.stats.failures) and (h not in self.stats.dark)]
self.inventory.restrict_to(hosts)
if port is None:
port=self.remote_port
runner = ansible.runner.Runner(
pattern=pattern, inventory=self.inventory, module_name=module,
module_args=args, forks=self.forks,
pattern=task.play.hosts, inventory=self.inventory, module_name=task.module_name,
module_args=task.module_args, forks=self.forks,
remote_pass=self.remote_pass, module_path=self.module_path,
timeout=self.timeout, remote_user=remote_user,
remote_port=port, module_vars=vars,
timeout=self.timeout, remote_user=task.play.remote_user,
remote_port=task.play.remote_port, module_vars=task.module_vars,
private_key_file=self.private_key_file,
setup_cache=SETUP_CACHE, basedir=self.basedir,
conditional=only_if, callbacks=self.runner_callbacks,
debug=self.debug, sudo=sudo, sudo_user=sudo_user,
transport=transport, sudo_pass=self.sudo_pass, is_playbook=True
setup_cache=self.SETUP_CACHE, basedir=self.basedir,
conditional=task.only_if, callbacks=self.runner_callbacks,
debug=self.debug, sudo=task.play.sudo, sudo_user=task.play.sudo_user,
transport=task.play.transport, sudo_pass=self.sudo_pass, is_playbook=True
)
if async_seconds == 0:
if task.async_seconds == 0:
results = runner.run()
else:
results, poller = runner.run_async(async_seconds)
results, poller = runner.run_async(task.async_seconds)
self.stats.compute(results)
if async_poll_interval > 0:
# if not polling, playbook requested fire and forget
# trust the user wanted that and return immediately
results = self._async_poll(poller, async_seconds, async_poll_interval)
if task.async_poll_interval > 0:
# if not polling, playbook requested fire and forget, so don't poll
results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
self.inventory.lift_restriction()
return results
# *****************************************************
def _run_task(self, pattern=None, task=None,
remote_user=None, handlers=None, conditional=False,
sudo=False, sudo_user=None, transport=None, port=None):
def _run_task(self, play, task, is_handler):
''' run a single task in the playbook and recursively run any subtasks. '''
# load the module name and parameters from the task entry
name = task.get('name', None)
action = task.get('action', None)
if action is None:
raise errors.AnsibleError("action is required for each item in tasks: offending task is %s" % name if name else "unknown")
if name is None:
name = action
only_if = task.get('only_if', 'True')
async_seconds = int(task.get('async', 0)) # not async by default
async_poll_interval = int(task.get('poll', 10)) # default poll = 10 seconds
self.callbacks.on_task_start(task.name, is_handler)
tokens = action.split(None, 1)
module_name = tokens[0]
module_args = ''
if len(tokens) > 1:
module_args = tokens[1]
# include task specific vars
module_vars = task.get('vars', {})
if 'first_available_file' in task:
module_vars['first_available_file'] = task.get('first_available_file')
# tasks can be direct (run on all nodes matching
# the pattern) or conditional, where they ran
# as the result of a change handler on a subset
# of all of the hosts
self.callbacks.on_task_start(name, conditional)
# load up an appropriate ansible runner to
# run the task in parallel
results = self._run_module(pattern, module_name,
module_args, module_vars, remote_user, async_seconds,
async_poll_interval, only_if, sudo, sudo_user, transport, port)
# load up an appropriate ansible runner to run the task in parallel
results = self._run_task_internal(task)
# add facts to the global setup cache
for host, result in results['contacted'].iteritems():
if "ansible_facts" in result:
for k,v in result['ansible_facts'].iteritems():
SETUP_CACHE[host][k]=v
self.SETUP_CACHE[host][k]=v
self.stats.compute(results)
# if no hosts are matched, carry on, unlike /bin/ansible
# which would warn you about this
# if no hosts are matched, carry on
if results is None:
results = {}
# flag which notify handlers need to be run
# this will be on a SUBSET of the actual host list. For instance
# a file might need to be written on only half of the nodes so
# we would only trigger restarting Apache on half of the nodes
subtasks = task.get('notify', [])
if isinstance(subtasks, basestring):
subtasks = [subtasks]
if len(subtasks) > 0:
if len(task.notify) > 0:
for host, results in results.get('contacted',{}).iteritems():
if results.get('changed', False):
for subtask in subtasks:
self._flag_handler(handlers, subtask, host)
for handler_name in task.notify:
self._flag_handler(play.handlers(), handler_name, host)
# *****************************************************
def _flag_handler(self, handlers, match_name, host):
def _flag_handler(self, handlers, handler_name, host):
'''
if a task has any notify elements, flag handlers for run
at end of execution cycle for hosts that have indicated
changes have been made
'''
# for all registered handlers in the ansible playbook
# for this particular pattern group
found = False
for x in handlers:
name = x.get('name', None)
if name is None:
raise errors.AnsibleError('handler is missing a name')
if match_name == name:
if handler_name == x.name:
found = True
self.callbacks.on_notify(host, name)
# flag the handler with the list of hosts it needs to be run on, it will be run later
if not 'run' in x:
x['run'] = []
x['run'].append(host)
self.callbacks.on_notify(host, x.name)
x.notified_by.append(host)
if not found:
raise errors.AnsibleError("change handler (%s) is not defined" % match_name)
raise errors.AnsibleError("change handler (%s) is not defined" % handler_name)
# *****************************************************
def _do_conditional_imports(self, vars_files, pattern=None):
''' handle the vars_files section, which can contain variables '''
def _do_setup_step(self, play, vars_files=None):
# FIXME: save parsed variable results in memory to avoid excessive re-reading/parsing
# FIXME: currently parses imports for hosts not in the pattern, that is not wrong, but it's
# not super optimized yet either, because we wouldn't have hit them, ergo
# it will raise false errors if there is no defaults variable file without any $vars
# in it, which could happen on uncontacted hosts.
if type(vars_files) != list:
raise errors.AnsibleError("vars_files must be a list")
host_list = [ h for h in self.inventory.list_hosts(pattern)
if not (h in self.stats.failures or h in self.stats.dark) ]
for host in host_list:
cache_vars = SETUP_CACHE.get(host,{})
SETUP_CACHE[host] = cache_vars
for filename in vars_files:
if type(filename) == list:
# loop over all filenames, loading the first one, and failing if # none found
found = False
sequence = []
for real_filename in filename:
filename2 = utils.path_dwim(self.basedir, utils.template(real_filename, cache_vars, SETUP_CACHE))
sequence.append(filename2)
if os.path.exists(filename2):
found = True
data = utils.parse_yaml_from_file(filename2)
SETUP_CACHE[host].update(data)
self.callbacks.on_import_for_host(host, filename2)
break
else:
self.callbacks.on_not_import_for_host(host, filename2)
if not found:
raise errors.AnsibleError(
"%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
)
else:
filename2 = utils.path_dwim(self.basedir, utils.template(filename, cache_vars, SETUP_CACHE))
if not os.path.exists(filename2):
raise errors.AnsibleError("no file matched for vars_file import: %s" % filename2)
data = utils.parse_yaml_from_file(filename2)
SETUP_CACHE[host].update(data)
self.callbacks.on_import_for_host(host, filename2)
# *****************************************************
def _do_setup_step(self, pattern, vars, user, port, sudo, sudo_user, transport, vars_files=None):
''' push variables down to the systems and get variables+facts back up '''
# this enables conditional includes like $facter_os.yml and is only done
@ -461,27 +238,22 @@ class PlayBook(object):
if vars_files is not None:
self.callbacks.on_setup_secondary()
self._do_conditional_imports(vars_files, pattern)
play.update_vars_files(self.inventory.list_hosts(play.hosts))
else:
self.callbacks.on_setup_primary()
host_list = [ h for h in self.inventory.list_hosts(pattern)
if not (h in self.stats.failures or h in self.stats.dark) ]
host_list = [ h for h in self.inventory.list_hosts(play.hosts)
if not (h in self.stats.failures or h in self.stats.dark) ]
self.inventory.restrict_to(host_list)
# push any variables down to the system
setup_results = ansible.runner.Runner(
pattern=pattern, module_name='setup',
module_args=vars, inventory=self.inventory,
forks=self.forks, module_path=self.module_path,
timeout=self.timeout, remote_user=user,
remote_pass=self.remote_pass, remote_port=port,
private_key_file=self.private_key_file,
setup_cache=SETUP_CACHE,
callbacks=self.runner_callbacks, sudo=sudo, sudo_user=sudo_user,
debug=self.debug, transport=transport,
sudo_pass=self.sudo_pass, is_playbook=True
pattern=play.hosts, module_name='setup', module_args=play.vars, inventory=self.inventory,
forks=self.forks, module_path=self.module_path, timeout=self.timeout, remote_user=play.remote_user,
remote_pass=self.remote_pass, remote_port=play.remote_port, private_key_file=self.private_key_file,
setup_cache=self.SETUP_CACHE, callbacks=self.runner_callbacks, sudo=play.sudo, sudo_user=play.sudo_user,
debug=self.debug, transport=play.transport, sudo_pass=self.sudo_pass, is_playbook=True
).run()
self.stats.compute(setup_results, setup=True)
@ -494,83 +266,31 @@ class PlayBook(object):
# first pass only or we'll erase good work
for (host, result) in setup_ok.iteritems():
if 'ansible_facts' in result:
SETUP_CACHE[host] = result['ansible_facts']
self.SETUP_CACHE[host] = result['ansible_facts']
return setup_results
# *****************************************************
def _run_play(self, pg):
def _run_play(self, play):
''' run a list of tasks for a given pattern, in order '''
# get configuration information about the pattern
pattern = pg.get('hosts')
if pattern is None:
raise errors.AnsibleError('hosts declaration is required')
if isinstance(pattern, list):
pattern = ';'.join(pattern)
pattern = utils.template(pattern, self.extra_vars, {})
name = pg.get('name', pattern)
vars = self._get_vars(pg, self.basedir)
vars_files = pg.get('vars_files', {})
tasks = pg.get('tasks', [])
handlers = pg.get('handlers', [])
user = pg.get('user', self.remote_user)
port = pg.get('port', self.remote_port)
sudo = pg.get('sudo', self.sudo)
sudo_user = pg.get('sudo_user', self.sudo_user)
transport = pg.get('connection', self.transport)
# the default sudo user is root, so if you change it, sudo is implied
if sudo_user != 'root':
sudo = True
self.callbacks.on_play_start(name)
self.callbacks.on_play_start(play.name)
# push any variables down to the system # and get facts/ohai/other data back up
self._do_setup_step(pattern, vars, user, port, sudo, sudo_user, transport, None)
rc = self._do_setup_step(play) # pattern, vars, user, port, sudo, sudo_user, transport, None)
# now with that data, handle contentional variable file imports!
if len(vars_files) > 0:
self._do_setup_step(pattern, vars, user, port, sudo, sudo_user, transport, vars_files)
if len(play.vars_files) > 0:
rc = self._do_setup_step(play, play.vars_files)
# run all the top level tasks, these get run on every node
for task in tasks:
self._run_task(
pattern=pattern,
task=task,
handlers=handlers,
remote_user=user,
sudo=sudo,
sudo_user=sudo_user,
transport=transport,
port=port
)
for task in play.tasks():
self._run_task(play, task, False)
# handlers only run on certain nodes, they are flagged by _flag_handlers
# above. They only run on nodes when things mark them as changed, and
# handlers only get run once. For instance, the system is designed
# such that multiple config files if changed can ask for an Apache restart
# but Apache will only be restarted once (at the end).
for task in handlers:
triggered_by = task.get('run', None)
if type(triggered_by) == list:
self.inventory.restrict_to(triggered_by)
self._run_task(
pattern=pattern,
task=task,
handlers=[],
conditional=True,
remote_user=user,
sudo=sudo,
sudo_user=sudo_user,
transport=transport,
port=port
)
# run notify actions
for handler in play.handlers():
if len(handler.notified_by) > 0:
self.inventory.restrict_to(handler.notified_by)
self._run_task(play, handler, True)
self.inventory.lift_restriction()
# end of execution for this particular pattern. Multiple patterns
# can be in a single playbook file

View file

@ -0,0 +1,176 @@
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from ansible import utils
from ansible import errors
from ansible.playbook.task import Task
import shlex
import os
class Play(object):
__slots__ = [
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files', 'handlers', 'remote_user', 'remote_port',
'sudo', 'sudo_user', 'transport', 'playbook', '_ds', '_handlers', '_tasks'
]
# *************************************************
def __init__(self, playbook, ds):
self._ds = ds
self.playbook = playbook
self.hosts = ds.get('hosts', None)
self.hosts = utils.template(self.hosts, self.playbook.extra_vars, {})
self.name = ds.get('name', self.hosts)
self.vars = ds.get('vars', {})
self.vars_files = ds.get('vars_files', [])
self.vars_prompt = ds.get('vars_prompt', {})
self.vars = self._get_vars(self.playbook.basedir)
self._tasks = ds.get('tasks', [])
self._handlers = ds.get('handlers', [])
self.remote_user = ds.get('user', self.playbook.remote_user)
self.remote_port = ds.get('port', self.playbook.remote_port)
self.sudo = ds.get('sudo', self.playbook.sudo)
self.sudo_user = ds.get('sudo_user', self.playbook.sudo_user)
self.transport = ds.get('connection', self.playbook.transport)
self._tasks = self._load_tasks(self._ds, 'tasks')
self._handlers = self._load_tasks(self._ds, 'handlers')
if self.hosts is None:
raise errors.AnsibleError('hosts declaration is required')
if isinstance(self.hosts, list):
self.hosts = ';'.join(self.hosts)
if self.sudo_user != 'root':
self.sudo = True
# *************************************************
def _load_tasks(self, ds, keyname):
''' handle task and handler include statements '''
items = ds.get(keyname, [])
results = []
for x in items:
if 'include' in x:
task_vars = self.vars.copy()
tokens = shlex.split(x['include'])
for t in tokens[1:]:
(k,v) = t.split("=", 1)
task_vars[k]=v
include_file = tokens[0]
data = utils.parse_yaml_from_file(utils.path_dwim(self.playbook.basedir, include_file))
for y in data:
t = Task(self,y)
# TODO: rename this to just 'vars'
t.module_vars = self.vars.copy()
t.module_vars.update(task_vars)
results.append(t)
elif type(x) == dict:
results.append(Task(self, x))
else:
raise Exception("unexpected task type")
return results
# *************************************************
def handlers(self):
return self._handlers
def tasks(self):
return self._tasks
# *************************************************
def _get_vars(self, dirname):
''' load the vars section from a play '''
if self.vars is None:
self.vars = {}
if type(self.vars) not in [dict, list]:
raise errors.AnsibleError("'vars' section must contain only key/value pairs")
vars = self.playbook.global_vars
# translate a list of vars into a dict
if type(self.vars) == list:
for item in self.vars:
k, v = item.items()[0]
vars[k] = v
else:
vars.update(self.vars)
if type(self.vars_prompt) != dict:
raise errors.AnsibleError("'vars_prompt' section must contain only key/value pairs")
for vname in self.vars_prompt:
# TODO: make this prompt one line and consider double entry
vars[vname] = self.callbacks.on_vars_prompt(vname)
results = self.playbook.extra_vars.copy()
results.update(vars)
return results
# *************************************************
def update_vars_files(self, hosts):
''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
for h in hosts:
self.update_vars_files_for_host(h)
# *************************************************
def update_vars_files_for_host(self, host):
if not host in self.playbook.SETUP_CACHE:
# no need to process failed hosts or hosts not in this play
return
for filename in self.vars_files:
# TODO: maybe have to template the path here...
if type(filename) == list:
# loop over all filenames, loading the first one, and failing if # none found
found = False
sequence = []
for real_filename in filename:
filename2 = utils.template(real_filename, self.playbook.SETUP_CACHE[host])
filename2 = utils.template(filename2, self.vars)
filename2 = utils.path_dwim(self.playbook.basedir, filename2)
sequence.append(filename2)
if os.path.exists(filename2):
found = True
data = utils.parse_yaml_from_file(filename2)
self.playbook.SETUP_CACHE[host].update(data)
self.playbook.callbacks.on_import_for_host(host, filename2)
break
else:
self.playbook.callbacks.on_not_import_for_host(host, filename2)
if not found:
raise errors.AnsibleError(
"%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
)
else:
fpath = utils.path_dwim(self.playbook.basedir, utils.template(filename, self.vars))
new_vars = utils.parse_yaml_from_file(fpath)
self.playbook.SETUP_CACHE[host].update(new_vars)

View file

@ -0,0 +1,69 @@
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
#import ansible.inventory
#import ansible.runner
#import ansible.constants as C
#from ansible import utils
#from ansible import errors
class Task(object):
__slots__ = [
'name', 'action', 'only_if', 'async_seconds', 'async_poll_interval',
'notify', 'module_name', 'module_args', 'module_vars', 'play', 'notified_by',
]
def __init__(self, play, ds):
self.play = play
# FIXME: error handling on invalid fields
# action...
self.name = ds.get('name', None)
self.action = ds.get('action', '')
self.notified_by = []
if self.name is None:
self.name = self.action
self.only_if = ds.get('only_if', 'True')
self.async_seconds = int(ds.get('async', 0)) # not async by default
self.async_poll_interval = int(ds.get('poll', 10)) # default poll = 10 seconds
self.notify = ds.get('notify', [])
if isinstance(self.notify, basestring):
self.notify = [ self.notify ]
tokens = self.action.split(None, 1)
if len(tokens) < 1:
# FIXME: better error handling
raise Exception("invalid action in task: %s" % ds)
self.module_name = tokens[0]
self.module_args = ''
if len(tokens) > 1:
self.module_args = tokens[1]
# include task specific vars
self.module_vars = ds.get('vars', {})
if 'first_available_file' in ds:
self.module_vars['first_available_file'] = ds.get('first_available_file')

View file

@ -254,7 +254,7 @@ def varReplace(raw, vars):
return ''.join(done)
def template(text, vars, setup_cache, no_engine=True):
def template(text, vars, setup_cache=None, no_engine=True):
''' run a text buffer through the templating engine '''
vars = vars.copy()
vars['hostvars'] = setup_cache