commit
134873bada
17 changed files with 132 additions and 77 deletions
|
@ -55,3 +55,12 @@
|
|||
# will be ignored, and 4 will be used. Accepts a comma separated list,
|
||||
# the first found wins.
|
||||
# access_ip_version = 4
|
||||
|
||||
# Environment Variable: RAX_CACHE_MAX_AGE
|
||||
# Default: 600
|
||||
#
|
||||
# A configuration the changes the behavior or the inventory cache.
|
||||
# Inventory listing performed before this value will be returned from
|
||||
# the cache instead of making a full request for all inventory. Setting
|
||||
# this value to 0 will force a full request.
|
||||
# cache_max_age = 600
|
|
@ -355,9 +355,12 @@ def get_cache_file_path(regions):
|
|||
|
||||
|
||||
def _list(regions, refresh_cache=True):
|
||||
cache_max_age = int(get_config(p, 'rax', 'cache_max_age',
|
||||
'RAX_CACHE_MAX_AGE', 600))
|
||||
|
||||
if (not os.path.exists(get_cache_file_path(regions)) or
|
||||
refresh_cache or
|
||||
(time() - os.stat(get_cache_file_path(regions))[-1]) > 600):
|
||||
(time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age):
|
||||
# Cache file doesn't exist or older than 10m or refresh cache requested
|
||||
_list_into_cache(regions)
|
||||
|
||||
|
|
|
@ -78,6 +78,10 @@ class Inventory(object):
|
|||
self._restriction = None
|
||||
self._subset = None
|
||||
|
||||
# clear the cache here, which is only useful if more than
|
||||
# one Inventory objects are created when using the API directly
|
||||
self.clear_pattern_cache()
|
||||
|
||||
self.parse_inventory(host_list)
|
||||
|
||||
def serialize(self):
|
||||
|
|
|
@ -31,7 +31,7 @@ from ansible.errors import AnsibleError
|
|||
from ansible.inventory.host import Host
|
||||
from ansible.inventory.group import Group
|
||||
from ansible.module_utils.basic import json_dict_bytes_to_unicode
|
||||
from ansible.utils.unicode import to_str
|
||||
from ansible.utils.unicode import to_str, to_unicode
|
||||
|
||||
|
||||
class InventoryScript:
|
||||
|
@ -58,7 +58,13 @@ class InventoryScript:
|
|||
if sp.returncode != 0:
|
||||
raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
|
||||
|
||||
self.data = stdout
|
||||
# make sure script output is unicode so that json loader will output
|
||||
# unicode strings itself
|
||||
try:
|
||||
self.data = to_unicode(stdout, errors="strict")
|
||||
except Exception as e:
|
||||
raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_str(self.filename), to_str(e)))
|
||||
|
||||
# see comment about _meta below
|
||||
self.host_vars_from_top = None
|
||||
self._parse(stderr)
|
||||
|
@ -78,8 +84,6 @@ class InventoryScript:
|
|||
sys.stderr.write(err + "\n")
|
||||
raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(to_str(self.filename)))
|
||||
|
||||
self.raw = json_dict_bytes_to_unicode(self.raw)
|
||||
|
||||
group = None
|
||||
for (group_name, data) in self.raw.items():
|
||||
|
||||
|
|
|
@ -546,11 +546,10 @@ class AnsibleModule(object):
|
|||
if no_log_object:
|
||||
self.no_log_values.update(return_values(no_log_object))
|
||||
|
||||
# check the locale as set by the current environment, and
|
||||
# reset to LANG=C if it's an invalid/unavailable locale
|
||||
# check the locale as set by the current environment, and reset to
|
||||
# a known valid (LANG=C) if it's an invalid/unavailable locale
|
||||
self._check_locale()
|
||||
|
||||
|
||||
self._check_arguments(check_invalid_arguments)
|
||||
|
||||
# check exclusive early
|
||||
|
@ -1094,7 +1093,6 @@ class AnsibleModule(object):
|
|||
# as it would be returned by locale.getdefaultlocale()
|
||||
locale.setlocale(locale.LC_ALL, '')
|
||||
except locale.Error:
|
||||
e = get_exception()
|
||||
# fallback to the 'C' locale, which may cause unicode
|
||||
# issues but is preferable to simply failing because
|
||||
# of an unknown locale
|
||||
|
@ -1757,25 +1755,29 @@ class AnsibleModule(object):
|
|||
# rename might not preserve context
|
||||
self.set_context_if_different(dest, context, False)
|
||||
|
||||
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None):
|
||||
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None):
|
||||
'''
|
||||
Execute a command, returns rc, stdout, and stderr.
|
||||
args is the command to run
|
||||
If args is a list, the command will be run with shell=False.
|
||||
If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
|
||||
If args is a string and use_unsafe_shell=True it run with shell=True.
|
||||
Other arguments:
|
||||
- check_rc (boolean) Whether to call fail_json in case of
|
||||
non zero RC. Default is False.
|
||||
- close_fds (boolean) See documentation for subprocess.Popen().
|
||||
Default is True.
|
||||
- executable (string) See documentation for subprocess.Popen().
|
||||
Default is None.
|
||||
- prompt_regex (string) A regex string (not a compiled regex) which
|
||||
can be used to detect prompts in the stdout
|
||||
which would otherwise cause the execution
|
||||
to hang (especially if no input data is
|
||||
specified)
|
||||
|
||||
:arg args: is the command to run
|
||||
* If args is a list, the command will be run with shell=False.
|
||||
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
|
||||
* If args is a string and use_unsafe_shell=True it runs with shell=True.
|
||||
:kw check_rc: Whether to call fail_json in case of non zero RC.
|
||||
Default False
|
||||
:kw close_fds: See documentation for subprocess.Popen(). Default True
|
||||
:kw executable: See documentation for subprocess.Popen(). Default None
|
||||
:kw data: If given, information to write to the stdin of the command
|
||||
:kw binary_data: If False, append a newline to the data. Default False
|
||||
:kw path_prefix: If given, additional path to find the command in.
|
||||
This adds to the PATH environment vairable so helper commands in
|
||||
the same directory can also be found
|
||||
:kw cwd: iIf given, working directory to run the command inside
|
||||
:kw use_unsafe_shell: See `args` parameter. Default False
|
||||
:kw prompt_regex: Regex string (not a compiled regex) which can be
|
||||
used to detect prompts in the stdout which would otherwise cause
|
||||
the execution to hang (especially if no input data is specified)
|
||||
:kwarg environ_update: dictionary to *update* os.environ with
|
||||
'''
|
||||
|
||||
shell = False
|
||||
|
@ -1806,10 +1808,15 @@ class AnsibleModule(object):
|
|||
msg = None
|
||||
st_in = None
|
||||
|
||||
# Set a temporary env path if a prefix is passed
|
||||
env=os.environ
|
||||
# Manipulate the environ we'll send to the new process
|
||||
old_env_vals = {}
|
||||
if environ_update:
|
||||
for key, val in environ_update.items():
|
||||
old_env_vals[key] = os.environ.get(key, None)
|
||||
os.environ[key] = val
|
||||
if path_prefix:
|
||||
env['PATH']="%s:%s" % (path_prefix, env['PATH'])
|
||||
old_env_vals['PATH'] = os.environ['PATH']
|
||||
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
|
||||
|
||||
# create a printable version of the command for use
|
||||
# in reporting later, which strips out things like
|
||||
|
@ -1851,11 +1858,10 @@ class AnsibleModule(object):
|
|||
close_fds=close_fds,
|
||||
stdin=st_in,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
stderr=subprocess.PIPE,
|
||||
env=os.environ,
|
||||
)
|
||||
|
||||
if path_prefix:
|
||||
kwargs['env'] = env
|
||||
if cwd and os.path.isdir(cwd):
|
||||
kwargs['cwd'] = cwd
|
||||
|
||||
|
@ -1934,6 +1940,13 @@ class AnsibleModule(object):
|
|||
except:
|
||||
self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args)
|
||||
|
||||
# Restore env settings
|
||||
for key, val in old_env_vals.items():
|
||||
if val is None:
|
||||
del os.environ[key]
|
||||
else:
|
||||
os.environ[key] = val
|
||||
|
||||
if rc != 0 and check_rc:
|
||||
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
|
||||
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
|
||||
|
|
|
@ -559,9 +559,11 @@ class Facts(object):
|
|||
# also other OSs other than linux might need to check across several possible candidates
|
||||
|
||||
# try various forms of querying pid 1
|
||||
proc_1 = os.path.basename(get_file_content('/proc/1/comm'))
|
||||
proc_1 = get_file_content('/proc/1/comm')
|
||||
if proc_1 is None:
|
||||
rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
|
||||
else:
|
||||
proc_1 = os.path.basename(proc_1)
|
||||
|
||||
if proc_1 == 'init' or proc_1.endswith('sh'):
|
||||
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
|
||||
|
@ -569,7 +571,7 @@ class Facts(object):
|
|||
|
||||
# if not init/None it should be an identifiable or custom init, so we are done!
|
||||
if proc_1 is not None:
|
||||
self.facts['service_mgr'] = proc_1
|
||||
self.facts['service_mgr'] = proc_1.strip()
|
||||
|
||||
# start with the easy ones
|
||||
elif self.facts['distribution'] == 'MacOSX':
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit ffea58ee86dbee20dc272c74cd5f8e02f6f317e6
|
||||
Subproject commit d7fac82f97c153af08dbea2b2ae9718b19abeb8a
|
|
@ -1 +1 @@
|
|||
Subproject commit e9450df878632531fae574b5eaf28bf0f7916948
|
||||
Subproject commit f798240f436a16a828f48759bbd176b6bccdfe75
|
|
@ -71,7 +71,7 @@ try:
|
|||
except ImportError:
|
||||
pass
|
||||
|
||||
from ansible.compat.six import PY3, byte2int
|
||||
from ansible.compat.six import PY3
|
||||
from ansible.utils.unicode import to_unicode, to_bytes
|
||||
|
||||
HAS_ANY_PBKDF2HMAC = HAS_PBKDF2 or HAS_PBKDF2HMAC
|
||||
|
@ -236,22 +236,24 @@ class VaultEditor:
|
|||
"""
|
||||
|
||||
file_len = os.path.getsize(tmp_path)
|
||||
max_chunk_len = min(1024*1024*2, file_len)
|
||||
|
||||
passes = 3
|
||||
with open(tmp_path, "wb") as fh:
|
||||
for _ in range(passes):
|
||||
fh.seek(0, 0)
|
||||
# get a random chunk of data, each pass with other length
|
||||
chunk_len = random.randint(max_chunk_len//2, max_chunk_len)
|
||||
data = os.urandom(chunk_len)
|
||||
if file_len > 0: # avoid work when file was empty
|
||||
max_chunk_len = min(1024*1024*2, file_len)
|
||||
|
||||
for _ in range(0, file_len // chunk_len):
|
||||
fh.write(data)
|
||||
fh.write(data[:file_len % chunk_len])
|
||||
passes = 3
|
||||
with open(tmp_path, "wb") as fh:
|
||||
for _ in range(passes):
|
||||
fh.seek(0, 0)
|
||||
# get a random chunk of data, each pass with other length
|
||||
chunk_len = random.randint(max_chunk_len//2, max_chunk_len)
|
||||
data = os.urandom(chunk_len)
|
||||
|
||||
assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy
|
||||
os.fsync(fh)
|
||||
for _ in range(0, file_len // chunk_len):
|
||||
fh.write(data)
|
||||
fh.write(data[:file_len % chunk_len])
|
||||
|
||||
assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy
|
||||
os.fsync(fh)
|
||||
|
||||
|
||||
def _shred_file(self, tmp_path):
|
||||
|
@ -273,7 +275,7 @@ class VaultEditor:
|
|||
|
||||
try:
|
||||
r = call(['shred', tmp_path])
|
||||
except OSError as e:
|
||||
except OSError:
|
||||
# shred is not available on this system, or some other error occured.
|
||||
r = 1
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import json
|
|||
import os
|
||||
import pipes
|
||||
import random
|
||||
import re
|
||||
import stat
|
||||
import tempfile
|
||||
import time
|
||||
|
@ -356,6 +357,14 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
|
||||
return data[idx:]
|
||||
|
||||
def _strip_success_message(self, data):
|
||||
'''
|
||||
Removes the BECOME-SUCCESS message from the data.
|
||||
'''
|
||||
if data.strip().startswith('BECOME-SUCCESS-'):
|
||||
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
|
||||
return data
|
||||
|
||||
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
|
||||
'''
|
||||
Transfer and run a module along with its arguments.
|
||||
|
@ -481,8 +490,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
|
||||
return data
|
||||
|
||||
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None,
|
||||
executable=None, encoding_errors='replace'):
|
||||
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=C.DEFAULT_EXECUTABLE, encoding_errors='replace'):
|
||||
'''
|
||||
This is the function which executes the low level shell command, which
|
||||
may be commands to create/remove directories for temporary files, or to
|
||||
|
@ -498,7 +506,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
|
|||
'''
|
||||
|
||||
if executable is not None:
|
||||
cmd = executable + ' -c ' + cmd
|
||||
cmd = executable + ' -c ' + pipes.quote(cmd)
|
||||
|
||||
display.debug("_low_level_execute_command(): starting")
|
||||
if not cmd:
|
||||
|
|
|
@ -75,4 +75,8 @@ class ActionModule(ActionBase):
|
|||
|
||||
result['changed'] = True
|
||||
|
||||
# be sure to strip out the BECOME-SUCCESS message, which may
|
||||
# be there depending on the output of the module
|
||||
result['stdout'] = self._strip_success_message(result.get('stdout', ''))
|
||||
|
||||
return result
|
||||
|
|
|
@ -19,8 +19,6 @@ __metaclass__ = type
|
|||
|
||||
from ansible.plugins.action import ActionBase
|
||||
|
||||
import re
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
TRANSFERS_FILES = False
|
||||
|
@ -42,7 +40,6 @@ class ActionModule(ActionBase):
|
|||
# for some modules (script, raw), the sudo success key
|
||||
# may leak into the stdout due to the way the sudo/su
|
||||
# command is constructed, so we filter that out here
|
||||
if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'):
|
||||
result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout'])
|
||||
result['stdout'] = self._strip_success_message(result.get('stdout', ''))
|
||||
|
||||
return result
|
||||
|
|
|
@ -26,10 +26,15 @@ class LookupModule(LookupBase):
|
|||
|
||||
def get_hosts(self, variables, pattern):
|
||||
hosts = []
|
||||
if pattern in variables['groups']:
|
||||
hosts = variables['groups'][pattern]
|
||||
elif pattern in variables['groups']['all']:
|
||||
hosts = [pattern]
|
||||
if pattern[0] in ('!','&'):
|
||||
obj = pattern[1:]
|
||||
else:
|
||||
obj = pattern
|
||||
|
||||
if obj in variables['groups']:
|
||||
hosts = variables['groups'][obj]
|
||||
elif obj in variables['groups']['all']:
|
||||
hosts = [obj]
|
||||
return hosts
|
||||
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
|
|
|
@ -492,7 +492,7 @@ class StrategyBase:
|
|||
tags = [ tags ]
|
||||
if len(tags) > 0:
|
||||
if len(b._task_include.tags) > 0:
|
||||
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task)",
|
||||
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
|
||||
obj=included_file._task._ds)
|
||||
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
|
||||
b._task_include.tags = tags
|
||||
|
|
|
@ -194,8 +194,6 @@ class StrategyModule(StrategyBase):
|
|||
|
||||
try:
|
||||
action = action_loader.get(task.action, class_only=True)
|
||||
if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
|
||||
run_once = True
|
||||
except KeyError:
|
||||
# we don't care here, because the action may simply not have a
|
||||
# corresponding action plugin
|
||||
|
@ -227,6 +225,8 @@ class StrategyModule(StrategyBase):
|
|||
templar = Templar(loader=self._loader, variables=task_vars)
|
||||
display.debug("done getting variables")
|
||||
|
||||
run_once = templar.template(task.run_once)
|
||||
|
||||
if not callback_sent:
|
||||
display.debug("sending task start callback, copying the task so we can template it temporarily")
|
||||
saved_name = task.name
|
||||
|
@ -249,7 +249,7 @@ class StrategyModule(StrategyBase):
|
|||
self._queue_task(host, task, task_vars, play_context)
|
||||
|
||||
# if we're bypassing the host loop, break out now
|
||||
if run_once:
|
||||
if run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
|
||||
break
|
||||
|
||||
results += self._process_pending_results(iterator, one_pass=True)
|
||||
|
@ -342,13 +342,20 @@ class StrategyModule(StrategyBase):
|
|||
display.debug("results queue empty")
|
||||
|
||||
display.debug("checking for any_errors_fatal")
|
||||
had_failure = include_failure
|
||||
failed_hosts = []
|
||||
for res in results:
|
||||
if res.is_failed() or res.is_unreachable():
|
||||
had_failure = True
|
||||
break
|
||||
if task and task.any_errors_fatal and had_failure:
|
||||
return False
|
||||
failed_hosts.append(res._host.name)
|
||||
|
||||
# if any_errors_fatal and we had an error, mark all hosts as failed
|
||||
if task and task.any_errors_fatal and len(failed_hosts) > 0:
|
||||
for host in hosts_left:
|
||||
# don't double-mark hosts, or the iterator will potentially
|
||||
# fail them out of the rescue/always states
|
||||
if host.name not in failed_hosts:
|
||||
self._tqm._failed_hosts[host.name] = True
|
||||
iterator.mark_host_failed(host)
|
||||
display.debug("done checking for any_errors_fatal")
|
||||
|
||||
except (IOError, EOFError) as e:
|
||||
display.debug("got IOError/EOFError in task loop: %s" % e)
|
||||
|
|
|
@ -39,6 +39,7 @@ class OpenStringIO(StringIO):
|
|||
def close(self):
|
||||
pass
|
||||
|
||||
|
||||
@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)")
|
||||
class TestAnsibleModuleRunCommand(unittest.TestCase):
|
||||
|
||||
|
@ -111,10 +112,6 @@ class TestAnsibleModuleRunCommand(unittest.TestCase):
|
|||
self.assertEqual(args, ('ls a " b" "c "', ))
|
||||
self.assertEqual(kwargs['shell'], True)
|
||||
|
||||
def test_path_prefix(self):
|
||||
self.module.run_command('foo', path_prefix='/opt/bin')
|
||||
self.assertEqual('/opt/bin', self.os.environ['PATH'].split(':')[0])
|
||||
|
||||
def test_cwd(self):
|
||||
self.os.getcwd.return_value = '/old'
|
||||
self.module.run_command('/bin/ls', cwd='/new')
|
||||
|
|
|
@ -49,7 +49,7 @@ class TestActionBase(unittest.TestCase):
|
|||
|
||||
play_context.remote_user = 'apo'
|
||||
action_base._low_level_execute_command('ECHO', sudoable=True)
|
||||
play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None)
|
||||
play_context.make_become_cmd.assert_called_once_with("/bin/sh -c ECHO", executable='/bin/sh')
|
||||
|
||||
play_context.make_become_cmd.reset_mock()
|
||||
|
||||
|
@ -58,6 +58,6 @@ class TestActionBase(unittest.TestCase):
|
|||
try:
|
||||
play_context.remote_user = 'root'
|
||||
action_base._low_level_execute_command('ECHO SAME', sudoable=True)
|
||||
play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None)
|
||||
play_context.make_become_cmd.assert_called_once_with("/bin/sh -c 'ECHO SAME'", executable='/bin/sh')
|
||||
finally:
|
||||
C.BECOME_ALLOW_SAME_USER = become_allow_same_user
|
||||
|
|
Loading…
Reference in a new issue