Merge pull request #10618 from jder/force-handlers

Fix --force-handlers, and allow it in plays and ansible.cfg
This commit is contained in:
Brian Coca 2015-04-13 10:16:45 -04:00
commit 67512aeeb6
11 changed files with 123 additions and 12 deletions

View file

@ -97,7 +97,8 @@ def main(args):
help="one-step-at-a-time: confirm each task before running") help="one-step-at-a-time: confirm each task before running")
parser.add_option('--start-at-task', dest='start_at', parser.add_option('--start-at-task', dest='start_at',
help="start the playbook at the task matching this name") help="start the playbook at the task matching this name")
parser.add_option('--force-handlers', dest='force_handlers', action='store_true', parser.add_option('--force-handlers', dest='force_handlers',
default=C.DEFAULT_FORCE_HANDLERS, action='store_true',
help="run handlers even if a task fails") help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true', parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache") help="clear the fact cache")

View file

@ -252,6 +252,20 @@ This options forces color mode even when running without a TTY::
force_color = 1 force_color = 1
.. _force_handlers:
force_handlers
==============
.. versionadded:: 1.9.1
This option causes notified handlers to run on a host even if a failure occurs on that host::
force_handlers = True
The default is False, meaning that handlers will not run if a failure has occurred on a host.
This can also be set per play or on the command line. See :doc:`_handlers_and_failure` for more details.
.. _forks: .. _forks:
forks forks

View file

@ -29,6 +29,26 @@ write a task that looks like this::
Note that the above system only governs the failure of the particular task, so if you have an undefined Note that the above system only governs the failure of the particular task, so if you have an undefined
variable used, it will still raise an error that users will need to address. variable used, it will still raise an error that users will need to address.
.. _handlers_and_failure:
Handlers and Failure
````````````````````
.. versionadded:: 1.9.1
When a task fails on a host, handlers which were previously notified
will *not* be run on that host. This can lead to cases where an unrelated failure
can leave a host in an unexpected state. For example, a task could update
a configuration file and notify a handler to restart some service. If a
task later on in the same play fails, the service will not be restarted despite
the configuration change.
You can change this behavior with the ``--force-handlers`` command-line option,
or by including ``force_handlers: True`` in a play, or ``force_handlers = True``
in ansible.cfg. When handlers are forced, they will run when notified even
if a task fails on that host. (Note that certain errors could still prevent
the handler from running, such as a host becoming unreachable.)
.. _controlling_what_defines_failure: .. _controlling_what_defines_failure:
Controlling What Defines Failure Controlling What Defines Failure

View file

@ -173,6 +173,8 @@ DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings',
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')

View file

@ -375,17 +375,17 @@ class PlayBook(object):
# ***************************************************** # *****************************************************
def _trim_unavailable_hosts(self, hostlist=[]): def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
''' returns a list of hosts that haven't failed and aren't dark ''' ''' returns a list of hosts that haven't failed and aren't dark '''
return [ h for h in hostlist if (h not in self.stats.failures) and (h not in self.stats.dark)] return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
# ***************************************************** # *****************************************************
def _run_task_internal(self, task): def _run_task_internal(self, task, include_failed=False):
''' run a particular module step in a playbook ''' ''' run a particular module step in a playbook '''
hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts)) hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
self.inventory.restrict_to(hosts) self.inventory.restrict_to(hosts)
runner = ansible.runner.Runner( runner = ansible.runner.Runner(
@ -493,7 +493,8 @@ class PlayBook(object):
task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
# load up an appropriate ansible runner to run the task in parallel # load up an appropriate ansible runner to run the task in parallel
results = self._run_task_internal(task) include_failed = is_handler and play.force_handlers
results = self._run_task_internal(task, include_failed=include_failed)
# if no hosts are matched, carry on # if no hosts are matched, carry on
hosts_remaining = True hosts_remaining = True
@ -811,7 +812,7 @@ class PlayBook(object):
# if no hosts remain, drop out # if no hosts remain, drop out
if not host_list: if not host_list:
if self.force_handlers: if play.force_handlers:
task_errors = True task_errors = True
break break
else: else:
@ -821,7 +822,7 @@ class PlayBook(object):
# lift restrictions after each play finishes # lift restrictions after each play finishes
self.inventory.lift_also_restriction() self.inventory.lift_also_restriction()
if task_errors and not self.force_handlers: if task_errors and not play.force_handlers:
# if there were failed tasks and handler execution # if there were failed tasks and handler execution
# is not forced, quit the play with an error # is not forced, quit the play with an error
return False return False
@ -856,7 +857,7 @@ class PlayBook(object):
play.max_fail_pct = 0 play.max_fail_pct = 0
if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
host_list = None host_list = None
if not host_list and not self.force_handlers: if not host_list and not play.force_handlers:
self.callbacks.on_no_hosts_remaining() self.callbacks.on_no_hosts_remaining()
return False return False

View file

@ -34,9 +34,10 @@ class Play(object):
_pb_common = [ _pb_common = [
'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become', 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
'become_method', 'become_user', 'environment', 'gather_facts', 'handlers', 'hosts', 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
'name', 'no_log', 'remote_user', 'roles', 'serial', 'su', 'su_user', 'sudo', 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt', 'vault_password', 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
'vault_password',
] ]
__slots__ = _pb_common + [ __slots__ = _pb_common + [
@ -153,6 +154,7 @@ class Play(object):
self.accelerate_ipv6 = ds.get('accelerate_ipv6', False) self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
self.max_fail_pct = int(ds.get('max_fail_percentage', 100)) self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
self.no_log = utils.boolean(ds.get('no_log', 'false')) self.no_log = utils.boolean(ds.get('no_log', 'false'))
self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
# Fail out if user specifies conflicting privelege escalations # Fail out if user specifies conflicting privelege escalations
if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')): if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):

View file

@ -56,6 +56,20 @@ test_group_by:
test_handlers: test_handlers:
ansible-playbook test_handlers.yml -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) ansible-playbook test_handlers.yml -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS)
# Not forcing, should only run on successful host
[ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ]
# Forcing from command line
[ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers --force-handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
# Forcing from command line, should only run later tasks on unfailed hosts
[ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers --force-handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_TASK_. | sort | uniq | xargs)" = "CALLED_TASK_B CALLED_TASK_D CALLED_TASK_E" ]
# Forcing from command line, should call handlers even if all hosts fail
[ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers --force-handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v -e fail_all=yes $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
# Forcing from ansible.cfg
[ "$$(ANSIBLE_FORCE_HANDLERS=true ansible-playbook --tags normal test_force_handlers.yml -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
# Forcing true in play
[ "$$(ansible-playbook test_force_handlers.yml --tags force_true_in_play -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
# Forcing false in play, which overrides command line
[ "$$(ansible-playbook test_force_handlers.yml --force-handlers --tags force_false_in_play -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ]
test_hash: test_hash:
ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}'

View file

@ -0,0 +1,2 @@
- name: echoing handler
command: echo CALLED_HANDLER_{{ inventory_hostname }}

View file

@ -0,0 +1,26 @@
---
# We notify for A and B, and hosts B and C fail.
# When forcing, we expect A and B to run handlers
# When not forcing, we expect only B to run handlers
- name: notify the handler for host A and B
shell: echo
notify:
- echoing handler
when: inventory_hostname == 'A' or inventory_hostname == 'B'
- name: fail task for all
fail: msg="Fail All"
when: fail_all is defined and fail_all
- name: fail task for A
fail: msg="Fail A"
when: inventory_hostname == 'A'
- name: fail task for C
fail: msg="Fail C"
when: inventory_hostname == 'C'
- name: echo after A and C have failed
command: echo CALLED_TASK_{{ inventory_hostname }}

View file

@ -0,0 +1,28 @@
---
- name: test force handlers (default)
tags: normal
hosts: testgroup
gather_facts: False
connection: local
roles:
- { role: test_force_handlers }
- name: test force handlers (set to true)
tags: force_true_in_play
hosts: testgroup
gather_facts: False
connection: local
force_handlers: True
roles:
- { role: test_force_handlers }
- name: test force handlers (set to false)
tags: force_false_in_play
hosts: testgroup
gather_facts: False
connection: local
force_handlers: False
roles:
- { role: test_force_handlers }

View file

@ -47,6 +47,7 @@ class FakePlayBook(object):
self.transport = None self.transport = None
self.only_tags = None self.only_tags = None
self.skip_tags = None self.skip_tags = None
self.force_handlers = None
self.VARS_CACHE = {} self.VARS_CACHE = {}
self.SETUP_CACHE = {} self.SETUP_CACHE = {}
self.inventory = FakeInventory() self.inventory = FakeInventory()