whitespace + remove deprecated YAML parser (migration script lives in examples/scripts and warning was added
in 0.6 release)
This commit is contained in:
parent
0810f26095
commit
faed4b5a33
36 changed files with 306 additions and 450 deletions
32
bin/ansible
32
bin/ansible
|
@ -33,35 +33,35 @@ from ansible import inventory
|
|||
|
||||
class Cli(object):
|
||||
''' code behind bin/ansible '''
|
||||
|
||||
# ----------------------------------------------
|
||||
|
||||
# ----------------------------------------------
|
||||
|
||||
def __init__(self):
|
||||
self.stats = callbacks.AggregateStats()
|
||||
self.callbacks = callbacks.CliRunnerCallbacks()
|
||||
|
||||
# ----------------------------------------------
|
||||
# ----------------------------------------------
|
||||
|
||||
def parse(self):
|
||||
''' create an options parser for bin/ansible '''
|
||||
|
||||
|
||||
parser = utils.base_parser(constants=C, runas_opts=True, async_opts=True,
|
||||
output_opts=True, connect_opts=True, usage='%prog <host-pattern> [options]')
|
||||
parser.add_option('-a', '--args', dest='module_args',
|
||||
help="module arguments", default=C.DEFAULT_MODULE_ARGS)
|
||||
parser.add_option('-m', '--module-name', dest='module_name',
|
||||
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
|
||||
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
|
||||
default=C.DEFAULT_MODULE_NAME)
|
||||
options, args = parser.parse_args()
|
||||
self.callbacks.options = options
|
||||
self.callbacks.options = options
|
||||
|
||||
if len(args) == 0 or len(args) > 1:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
return (options, args)
|
||||
|
||||
# ----------------------------------------------
|
||||
|
||||
return (options, args)
|
||||
|
||||
# ----------------------------------------------
|
||||
|
||||
def run(self, options, args):
|
||||
''' use Runner lib to do SSH things '''
|
||||
|
||||
|
@ -72,7 +72,7 @@ class Cli(object):
|
|||
if len(hosts) == 0:
|
||||
print >>sys.stderr, "No hosts matched"
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
sshpass = None
|
||||
sudopass = None
|
||||
if options.ask_pass:
|
||||
|
@ -90,11 +90,11 @@ class Cli(object):
|
|||
module_name=options.module_name, module_path=options.module_path,
|
||||
module_args=options.module_args,
|
||||
remote_user=options.remote_user, remote_pass=sshpass,
|
||||
inventory=inventory_manager, timeout=options.timeout,
|
||||
inventory=inventory_manager, timeout=options.timeout,
|
||||
private_key_file=options.private_key_file,
|
||||
forks=options.forks,
|
||||
pattern=pattern,
|
||||
callbacks=self.callbacks, sudo=options.sudo,
|
||||
forks=options.forks,
|
||||
pattern=pattern,
|
||||
callbacks=self.callbacks, sudo=options.sudo,
|
||||
sudo_pass=sudopass,sudo_user=options.sudo_user,
|
||||
transport=options.connection, verbose=options.verbose
|
||||
)
|
||||
|
@ -108,7 +108,7 @@ class Cli(object):
|
|||
|
||||
return (runner, results)
|
||||
|
||||
# ----------------------------------------------
|
||||
# ----------------------------------------------
|
||||
|
||||
def poll_while_needed(self, poller, options):
|
||||
''' summarize results from Runner '''
|
||||
|
|
|
@ -87,15 +87,15 @@ def main(args):
|
|||
pb = ansible.playbook.PlayBook(
|
||||
playbook=playbook,
|
||||
module_path=options.module_path,
|
||||
host_list=options.inventory,
|
||||
forks=options.forks,
|
||||
verbose=options.verbose,
|
||||
host_list=options.inventory,
|
||||
forks=options.forks,
|
||||
verbose=options.verbose,
|
||||
remote_user=options.remote_user,
|
||||
remote_pass=sshpass,
|
||||
callbacks=playbook_cb,
|
||||
runner_callbacks=runner_cb,
|
||||
remote_pass=sshpass,
|
||||
callbacks=playbook_cb,
|
||||
runner_callbacks=runner_cb,
|
||||
stats=stats,
|
||||
timeout=options.timeout,
|
||||
timeout=options.timeout,
|
||||
transport=options.connection,
|
||||
sudo=options.sudo,
|
||||
sudo_user=options.sudo_user,
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
# example playbook to bootstrap this script in the examples/ dir which
|
||||
# installs ansible and sets it up to run on cron.
|
||||
#
|
||||
# usage:
|
||||
# usage:
|
||||
# ansible-pull -d /var/ansible/local -U http://wherever/content.git -C production
|
||||
#
|
||||
# the git repo must contain a playbook named 'local.yml'
|
||||
|
|
|
@ -29,8 +29,8 @@ elif os.path.exists("/usr/games/cowsay"):
|
|||
cowsay = "/usr/games/cowsay"
|
||||
|
||||
class AggregateStats(object):
|
||||
''' holds stats about per-host activity during playbook runs '''
|
||||
|
||||
''' holds stats about per-host activity during playbook runs '''
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.processed = {}
|
||||
|
@ -49,7 +49,7 @@ class AggregateStats(object):
|
|||
|
||||
def compute(self, runner_results, setup=False, poll=False):
|
||||
''' walk through all results and increment stats '''
|
||||
|
||||
|
||||
for (host, value) in runner_results.get('contacted', {}).iteritems():
|
||||
if ('failed' in value and bool(value['failed'])) or ('rc' in value and value['rc'] != 0):
|
||||
self._increment('failures', host)
|
||||
|
@ -65,7 +65,7 @@ class AggregateStats(object):
|
|||
|
||||
for (host, value) in runner_results.get('dark', {}).iteritems():
|
||||
self._increment('dark', host)
|
||||
|
||||
|
||||
|
||||
def summarize(self, host):
|
||||
''' return information about a particular host '''
|
||||
|
@ -92,10 +92,10 @@ def regular_generic_msg(hostname, result, oneline, caption):
|
|||
def banner(msg):
|
||||
|
||||
if cowsay != None:
|
||||
cmd = subprocess.Popen("%s -W 60 \"%s\"" % (cowsay, msg),
|
||||
cmd = subprocess.Popen("%s -W 60 \"%s\"" % (cowsay, msg),
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||
(out, err) = cmd.communicate()
|
||||
return "%s\n" % out
|
||||
return "%s\n" % out
|
||||
else:
|
||||
return "\n%s ********************* " % msg
|
||||
|
||||
|
@ -182,7 +182,7 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks):
|
|||
def __init__(self):
|
||||
|
||||
# set by /usr/bin/ansible later
|
||||
self.options = None
|
||||
self.options = None
|
||||
self._async_notified = {}
|
||||
|
||||
def on_failed(self, host, res, ignore_errors=False):
|
||||
|
@ -192,7 +192,7 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks):
|
|||
def on_ok(self, host, res):
|
||||
|
||||
self._on_any(host,res)
|
||||
|
||||
|
||||
def on_unreachable(self, host, res):
|
||||
|
||||
if type(res) == dict:
|
||||
|
@ -200,17 +200,17 @@ class CliRunnerCallbacks(DefaultRunnerCallbacks):
|
|||
print "%s | FAILED => %s" % (host, res)
|
||||
if self.options.tree:
|
||||
utils.write_tree_file(
|
||||
self.options.tree, host,
|
||||
self.options.tree, host,
|
||||
utils.jsonify(dict(failed=True, msg=res),format=True)
|
||||
)
|
||||
|
||||
|
||||
def on_skipped(self, host):
|
||||
pass
|
||||
|
||||
def on_error(self, host, err):
|
||||
|
||||
print >>sys.stderr, "err: [%s] => %s\n" % (host, err)
|
||||
|
||||
|
||||
def on_no_hosts(self):
|
||||
|
||||
print >>sys.stderr, "no hosts matched\n"
|
||||
|
@ -277,11 +277,11 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks):
|
|||
item = host_result.get('item', None)
|
||||
|
||||
# show verbose output for non-setup module results if --verbose is used
|
||||
msg = ''
|
||||
msg = ''
|
||||
if not self.verbose or host_result.get("verbose_override",None) is not None:
|
||||
if item:
|
||||
msg = "ok: [%s] => (item=%s)" % (host,item)
|
||||
else:
|
||||
else:
|
||||
if 'ansible_job_id' not in host_result or 'finished' in host_result:
|
||||
msg = "ok: [%s]" % (host)
|
||||
else:
|
||||
|
@ -347,7 +347,7 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks):
|
|||
|
||||
class PlaybookCallbacks(object):
|
||||
''' playbook.py callbacks used by /usr/bin/ansible-playbook '''
|
||||
|
||||
|
||||
def __init__(self, verbose=False):
|
||||
|
||||
self.verbose = verbose
|
||||
|
@ -376,11 +376,11 @@ class PlaybookCallbacks(object):
|
|||
if private:
|
||||
return getpass.getpass(msg)
|
||||
return raw_input(msg)
|
||||
|
||||
|
||||
def on_setup(self):
|
||||
|
||||
print banner("GATHERING FACTS")
|
||||
|
||||
|
||||
def on_import_for_host(self, host, imported_file):
|
||||
|
||||
msg = "%s: importing %s" % (host, imported_file)
|
||||
|
|
|
@ -20,7 +20,7 @@ class AnsibleError(Exception):
|
|||
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import os
|
|||
import subprocess
|
||||
import ansible.constants as C
|
||||
from ansible.inventory.ini import InventoryParser
|
||||
from ansible.inventory.yaml import InventoryParserYaml
|
||||
from ansible.inventory.script import InventoryScript
|
||||
from ansible.inventory.group import Group
|
||||
from ansible.inventory.host import Host
|
||||
|
@ -31,12 +30,12 @@ from ansible import errors
|
|||
from ansible import utils
|
||||
|
||||
class Inventory(object):
|
||||
"""
|
||||
"""
|
||||
Host inventory for ansible.
|
||||
"""
|
||||
|
||||
__slots__ = [ 'host_list', 'groups', '_restriction', '_is_script',
|
||||
'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache' ]
|
||||
'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache' ]
|
||||
|
||||
def __init__(self, host_list=C.DEFAULT_HOST_LIST):
|
||||
|
||||
|
@ -46,14 +45,14 @@ class Inventory(object):
|
|||
|
||||
# caching to avoid repeated calculations, particularly with
|
||||
# external inventory scripts.
|
||||
|
||||
|
||||
self._vars_per_host = {}
|
||||
self._vars_per_group = {}
|
||||
self._hosts_cache = {}
|
||||
|
||||
# the inventory object holds a list of groups
|
||||
self.groups = []
|
||||
|
||||
|
||||
# a list of host(names) to contain current inquiries to
|
||||
self._restriction = None
|
||||
|
||||
|
@ -83,10 +82,9 @@ class Inventory(object):
|
|||
if not data.startswith("---"):
|
||||
self.parser = InventoryParser(filename=host_list)
|
||||
self.groups = self.parser.groups.values()
|
||||
else:
|
||||
self.parser = InventoryParserYaml(filename=host_list)
|
||||
self.groups = self.parser.groups.values()
|
||||
|
||||
else:
|
||||
raise errors.AnsibleError("YAML inventory support is deprecated in 0.6 and removed in 0.7, see the migration script in examples/scripts in the git checkout")
|
||||
|
||||
def _match(self, str, pattern_str):
|
||||
return fnmatch.fnmatch(str, pattern_str)
|
||||
|
||||
|
@ -107,7 +105,7 @@ class Inventory(object):
|
|||
for host in group.get_hosts():
|
||||
if self._match(group.name, pat) or pat == 'all' or self._match(host.name, pat):
|
||||
# must test explicitly for None because [] means no hosts allowed
|
||||
if self._restriction==None or host.name in self._restriction:
|
||||
if self._restriction==None or host.name in self._restriction:
|
||||
if inverted:
|
||||
if host.name in hosts:
|
||||
del hosts[host.name]
|
||||
|
@ -135,7 +133,7 @@ class Inventory(object):
|
|||
if group.name == groupname:
|
||||
return group
|
||||
return None
|
||||
|
||||
|
||||
def get_group_variables(self, groupname):
|
||||
if groupname not in self._vars_per_group:
|
||||
self._vars_per_group[groupname] = self._get_group_variables(groupname)
|
||||
|
@ -157,8 +155,8 @@ class Inventory(object):
|
|||
if self._is_script:
|
||||
host = self.get_host(hostname)
|
||||
cmd = subprocess.Popen(
|
||||
[self.host_list,"--host",hostname],
|
||||
stdout=subprocess.PIPE,
|
||||
[self.host_list,"--host",hostname],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
)
|
||||
(out, err) = cmd.communicate()
|
||||
|
@ -184,7 +182,7 @@ class Inventory(object):
|
|||
return [ h.name for h in self.get_hosts(pattern) ]
|
||||
|
||||
def list_groups(self):
|
||||
return [ g.name for g in self.groups ]
|
||||
return [ g.name for g in self.groups ]
|
||||
|
||||
def get_restriction(self):
|
||||
return self._restriction
|
||||
|
|
|
@ -40,15 +40,15 @@ def detect_range(line = None):
|
|||
|
||||
Returnes True if the given line contains a pattern, else False.
|
||||
'''
|
||||
if (not line.startswith("[") and
|
||||
line.find("[") != -1 and
|
||||
if (not line.startswith("[") and
|
||||
line.find("[") != -1 and
|
||||
line.find(":") != -1 and
|
||||
line.find("]") != -1 and
|
||||
line.index("[") < line.index(":") < line.index("]")):
|
||||
line.index("[") < line.index(":") < line.index("]")):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def expand_hostname_range(line = None):
|
||||
'''
|
||||
A helper function that expands a given line that contains a pattern
|
||||
|
@ -64,11 +64,11 @@ def expand_hostname_range(line = None):
|
|||
all_hosts = []
|
||||
if line:
|
||||
# A hostname such as db[1:6]-node is considered to consists
|
||||
# three parts:
|
||||
# three parts:
|
||||
# head: 'db'
|
||||
# nrange: [1:6]; range() is a built-in. Can't use the name
|
||||
# tail: '-node'
|
||||
|
||||
|
||||
(head, nrange, tail) = line.replace('[','|').replace(']','|').split('|')
|
||||
bounds = nrange.split(":")
|
||||
if len(bounds) != 2:
|
||||
|
@ -85,7 +85,7 @@ def expand_hostname_range(line = None):
|
|||
rlen = None
|
||||
if rlen > 1 and rlen != len(end):
|
||||
raise errors.AnsibleError("host range format incorrectly specified!")
|
||||
|
||||
|
||||
for _ in range(int(beg), int(end)+1):
|
||||
if rlen:
|
||||
rseq = str(_).zfill(rlen) # range sequence
|
||||
|
@ -93,5 +93,5 @@ def expand_hostname_range(line = None):
|
|||
rseq = str(_)
|
||||
hname = ''.join((head, rseq, tail))
|
||||
all_hosts.append(hname)
|
||||
|
||||
|
||||
return all_hosts
|
||||
|
|
|
@ -52,7 +52,7 @@ class Group(object):
|
|||
for kid in self.child_groups:
|
||||
hosts.extend(kid.get_hosts())
|
||||
hosts.extend(self.hosts)
|
||||
return hosts
|
||||
return hosts
|
||||
|
||||
def get_variables(self):
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ class Host(object):
|
|||
groups[g.name] = g
|
||||
ancestors = g.get_ancestors()
|
||||
for a in ancestors:
|
||||
groups[a.name] = a
|
||||
groups[a.name] = a
|
||||
return groups.values()
|
||||
|
||||
def get_variables(self):
|
||||
|
|
|
@ -30,7 +30,7 @@ from ansible import errors
|
|||
from ansible import utils
|
||||
|
||||
class InventoryParser(object):
|
||||
"""
|
||||
"""
|
||||
Host inventory for ansible.
|
||||
"""
|
||||
|
||||
|
@ -41,20 +41,20 @@ class InventoryParser(object):
|
|||
self.groups = {}
|
||||
self.hosts = {}
|
||||
self._parse()
|
||||
|
||||
|
||||
def _parse(self):
|
||||
|
||||
self._parse_base_groups()
|
||||
self._parse_group_children()
|
||||
self._parse_group_variables()
|
||||
return self.groups
|
||||
|
||||
|
||||
|
||||
# [webservers]
|
||||
# alpha
|
||||
# beta:2345
|
||||
# gamma sudo=True user=root
|
||||
# delta asdf=jkl favcolor=red
|
||||
# delta asdf=jkl favcolor=red
|
||||
|
||||
def _parse_base_groups(self):
|
||||
# FIXME: refactor
|
||||
|
@ -93,7 +93,7 @@ class InventoryParser(object):
|
|||
tokens2 = hostname.rsplit(":", 1)
|
||||
hostname = tokens2[0]
|
||||
port = tokens2[1]
|
||||
|
||||
|
||||
host = None
|
||||
_all_hosts = []
|
||||
if hostname in self.hosts:
|
||||
|
|
|
@ -52,5 +52,5 @@ class InventoryScript(object):
|
|||
# FIXME: hack shouldn't be needed
|
||||
all.add_host(host)
|
||||
all.add_child_group(group)
|
||||
return groups
|
||||
|
||||
return groups
|
||||
|
||||
|
|
|
@ -1,142 +0,0 @@
|
|||
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.inventory.host import Host
|
||||
from ansible.inventory.group import Group
|
||||
from ansible import errors
|
||||
from ansible import utils
|
||||
import sys
|
||||
|
||||
class InventoryParserYaml(object):
|
||||
''' Host inventory parser for ansible '''
|
||||
|
||||
__slots__ = [ '_hosts', 'groups' ]
|
||||
|
||||
def __init__(self, filename=C.DEFAULT_HOST_LIST):
|
||||
|
||||
sys.stderr.write("WARNING: YAML inventory files are deprecated in 0.6 and will be removed in 0.7, to migrate" +
|
||||
" download and run https://github.com/ansible/ansible/blob/devel/examples/scripts/yaml_to_ini.py\n")
|
||||
|
||||
fh = open(filename)
|
||||
data = fh.read()
|
||||
fh.close()
|
||||
self._hosts = {}
|
||||
self._parse(data)
|
||||
|
||||
def _make_host(self, hostname):
|
||||
|
||||
if hostname in self._hosts:
|
||||
return self._hosts[hostname]
|
||||
else:
|
||||
host = Host(hostname)
|
||||
self._hosts[hostname] = host
|
||||
return host
|
||||
|
||||
# see file 'test/yaml_hosts' for syntax
|
||||
|
||||
def _parse(self, data):
|
||||
# FIXME: refactor into subfunctions
|
||||
|
||||
all = Group('all')
|
||||
ungrouped = Group('ungrouped')
|
||||
all.add_child_group(ungrouped)
|
||||
|
||||
self.groups = dict(all=all, ungrouped=ungrouped)
|
||||
grouped_hosts = []
|
||||
|
||||
yaml = utils.parse_yaml(data)
|
||||
|
||||
# first add all groups
|
||||
for item in yaml:
|
||||
if type(item) == dict and 'group' in item:
|
||||
group = Group(item['group'])
|
||||
|
||||
for subresult in item.get('hosts',[]):
|
||||
|
||||
if type(subresult) in [ str, unicode ]:
|
||||
host = self._make_host(subresult)
|
||||
group.add_host(host)
|
||||
grouped_hosts.append(host)
|
||||
elif type(subresult) == dict:
|
||||
host = self._make_host(subresult['host'])
|
||||
vars = subresult.get('vars',{})
|
||||
if type(vars) == list:
|
||||
for subitem in vars:
|
||||
for (k,v) in subitem.items():
|
||||
host.set_variable(k,v)
|
||||
elif type(vars) == dict:
|
||||
for (k,v) in subresult.get('vars',{}).items():
|
||||
host.set_variable(k,v)
|
||||
else:
|
||||
raise errors.AnsibleError("unexpected type for variable")
|
||||
group.add_host(host)
|
||||
grouped_hosts.append(host)
|
||||
|
||||
vars = item.get('vars',{})
|
||||
if type(vars) == dict:
|
||||
for (k,v) in item.get('vars',{}).items():
|
||||
group.set_variable(k,v)
|
||||
elif type(vars) == list:
|
||||
for subitem in vars:
|
||||
if type(subitem) != dict:
|
||||
raise errors.AnsibleError("expected a dictionary")
|
||||
for (k,v) in subitem.items():
|
||||
group.set_variable(k,v)
|
||||
|
||||
self.groups[group.name] = group
|
||||
all.add_child_group(group)
|
||||
|
||||
# add host definitions
|
||||
for item in yaml:
|
||||
if type(item) in [ str, unicode ]:
|
||||
host = self._make_host(item)
|
||||
if host not in grouped_hosts:
|
||||
ungrouped.add_host(host)
|
||||
|
||||
elif type(item) == dict and 'host' in item:
|
||||
host = self._make_host(item['host'])
|
||||
|
||||
vars = item.get('vars', {})
|
||||
if type(vars)==list:
|
||||
varlist, vars = vars, {}
|
||||
for subitem in varlist:
|
||||
vars.update(subitem)
|
||||
for (k,v) in vars.items():
|
||||
host.set_variable(k,v)
|
||||
|
||||
groups = item.get('groups', {})
|
||||
if type(groups) in [ str, unicode ]:
|
||||
groups = [ groups ]
|
||||
if type(groups)==list:
|
||||
for subitem in groups:
|
||||
if subitem in self.groups:
|
||||
group = self.groups[subitem]
|
||||
else:
|
||||
group = Group(subitem)
|
||||
self.groups[group.name] = group
|
||||
all.add_child_group(group)
|
||||
group.add_host(host)
|
||||
grouped_hosts.append(host)
|
||||
|
||||
if host not in grouped_hosts:
|
||||
ungrouped.add_host(host)
|
||||
|
||||
# make sure ungrouped.hosts is the complement of grouped_hosts
|
||||
ungrouped_hosts = [host for host in ungrouped.hosts if host not in grouped_hosts]
|
||||
|
||||
|
|
@ -56,7 +56,7 @@ except ImportError:
|
|||
class AnsibleModule(object):
|
||||
|
||||
def __init__(self, argument_spec, bypass_checks=False, no_log=False, check_invalid_arguments=True):
|
||||
'''
|
||||
'''
|
||||
common code for quickly building an ansible module in Python
|
||||
(although you can write modules in anything that can return JSON)
|
||||
see library/slurp and others for examples
|
||||
|
@ -67,7 +67,7 @@ class AnsibleModule(object):
|
|||
|
||||
self._legal_inputs = []
|
||||
self._handle_aliases()
|
||||
|
||||
|
||||
# this may be disabled where modules are going to daisy chain into others
|
||||
if check_invalid_arguments:
|
||||
self._check_invalid_arguments()
|
||||
|
@ -151,7 +151,7 @@ class AnsibleModule(object):
|
|||
for x in items:
|
||||
(k, v) = x.split("=",1)
|
||||
params[k] = v
|
||||
return (params, args)
|
||||
return (params, args)
|
||||
|
||||
def _log_invocation(self):
|
||||
''' log that ansible ran the module '''
|
||||
|
@ -173,7 +173,7 @@ class AnsibleModule(object):
|
|||
return False
|
||||
else:
|
||||
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
|
||||
|
||||
|
||||
def jsonify(self, data):
|
||||
return json.dumps(data)
|
||||
|
||||
|
|
|
@ -28,12 +28,12 @@ SETUP_CACHE = collections.defaultdict(dict)
|
|||
|
||||
class PlayBook(object):
|
||||
'''
|
||||
runs an ansible playbook, given as a datastructure or YAML filename.
|
||||
A playbook is a deployment, config management, or automation based
|
||||
runs an ansible playbook, given as a datastructure or YAML filename.
|
||||
A playbook is a deployment, config management, or automation based
|
||||
set of commands to run in series.
|
||||
|
||||
multiple plays/tasks do not execute simultaneously, but tasks in each
|
||||
pattern do execute in parallel (according to the number of forks
|
||||
multiple plays/tasks do not execute simultaneously, but tasks in each
|
||||
pattern do execute in parallel (according to the number of forks
|
||||
requested) among the hosts they address
|
||||
'''
|
||||
|
||||
|
@ -86,7 +86,7 @@ class PlayBook(object):
|
|||
extra_vars = {}
|
||||
if only_tags is None:
|
||||
only_tags = [ 'all' ]
|
||||
|
||||
|
||||
self.module_path = module_path
|
||||
self.forks = forks
|
||||
self.timeout = timeout
|
||||
|
@ -107,7 +107,7 @@ class PlayBook(object):
|
|||
self.only_tags = only_tags
|
||||
|
||||
self.inventory = ansible.inventory.Inventory(host_list)
|
||||
|
||||
|
||||
if not self.inventory._is_script:
|
||||
self.global_vars.update(self.inventory.get_group_variables('all'))
|
||||
|
||||
|
@ -143,7 +143,7 @@ class PlayBook(object):
|
|||
return accumulated_plays
|
||||
|
||||
# *****************************************************
|
||||
|
||||
|
||||
def run(self):
|
||||
''' run all patterns in the playbook '''
|
||||
|
||||
|
@ -186,11 +186,11 @@ class PlayBook(object):
|
|||
pattern=task.play.hosts, inventory=self.inventory, module_name=task.module_name,
|
||||
module_args=task.module_args, forks=self.forks,
|
||||
remote_pass=self.remote_pass, module_path=self.module_path,
|
||||
timeout=self.timeout, remote_user=task.play.remote_user,
|
||||
timeout=self.timeout, remote_user=task.play.remote_user,
|
||||
remote_port=task.play.remote_port, module_vars=task.module_vars,
|
||||
private_key_file=self.private_key_file,
|
||||
setup_cache=self.SETUP_CACHE, basedir=self.basedir,
|
||||
conditional=task.only_if, callbacks=self.runner_callbacks,
|
||||
conditional=task.only_if, callbacks=self.runner_callbacks,
|
||||
verbose=self.verbose, sudo=task.play.sudo, sudo_user=task.play.sudo_user,
|
||||
transport=task.play.transport, sudo_pass=self.sudo_pass, is_playbook=True
|
||||
)
|
||||
|
@ -226,7 +226,7 @@ class PlayBook(object):
|
|||
for host, result in results['contacted'].iteritems():
|
||||
facts = result.get('ansible_facts', {})
|
||||
self.SETUP_CACHE[host].update(facts)
|
||||
|
||||
|
||||
# flag which notify handlers need to be run
|
||||
if len(task.notify) > 0:
|
||||
for host, results in results.get('contacted',{}).iteritems():
|
||||
|
@ -237,7 +237,7 @@ class PlayBook(object):
|
|||
# *****************************************************
|
||||
|
||||
def _flag_handler(self, handlers, handler_name, host):
|
||||
'''
|
||||
'''
|
||||
if a task has any notify elements, flag handlers for run
|
||||
at end of execution cycle for hosts that have indicated
|
||||
changes have been made
|
||||
|
@ -256,8 +256,8 @@ class PlayBook(object):
|
|||
|
||||
def _do_setup_step(self, play):
|
||||
''' get facts from the remote system '''
|
||||
|
||||
host_list = [ h for h in self.inventory.list_hosts(play.hosts)
|
||||
|
||||
host_list = [ h for h in self.inventory.list_hosts(play.hosts)
|
||||
if not (h in self.stats.failures or h in self.stats.dark) ]
|
||||
|
||||
if not play.gather_facts:
|
||||
|
@ -271,7 +271,7 @@ class PlayBook(object):
|
|||
pattern=play.hosts, module_name='setup', module_args={}, inventory=self.inventory,
|
||||
forks=self.forks, module_path=self.module_path, timeout=self.timeout, remote_user=play.remote_user,
|
||||
remote_pass=self.remote_pass, remote_port=play.remote_port, private_key_file=self.private_key_file,
|
||||
setup_cache=self.SETUP_CACHE, callbacks=self.runner_callbacks, sudo=play.sudo, sudo_user=play.sudo_user,
|
||||
setup_cache=self.SETUP_CACHE, callbacks=self.runner_callbacks, sudo=play.sudo, sudo_user=play.sudo_user,
|
||||
verbose=self.verbose, transport=play.transport, sudo_pass=self.sudo_pass, is_playbook=True
|
||||
).run()
|
||||
self.stats.compute(setup_results, setup=True)
|
||||
|
@ -297,14 +297,14 @@ class PlayBook(object):
|
|||
self.callbacks.on_play_start(play.name)
|
||||
|
||||
# get facts from system
|
||||
rc = self._do_setup_step(play)
|
||||
rc = self._do_setup_step(play)
|
||||
|
||||
# now with that data, handle contentional variable file imports!
|
||||
if play.vars_files and len(play.vars_files) > 0:
|
||||
play.update_vars_files(self.inventory.list_hosts(play.hosts))
|
||||
|
||||
for task in play.tasks():
|
||||
|
||||
|
||||
# only run the task if the requested tags match
|
||||
should_run = False
|
||||
for x in self.only_tags:
|
||||
|
|
|
@ -25,10 +25,10 @@ import os
|
|||
|
||||
class Play(object):
|
||||
|
||||
__slots__ = [
|
||||
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
|
||||
__slots__ = [
|
||||
'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
|
||||
'handlers', 'remote_user', 'remote_port',
|
||||
'sudo', 'sudo_user', 'transport', 'playbook',
|
||||
'sudo', 'sudo_user', 'transport', 'playbook',
|
||||
'tags', 'gather_facts', '_ds', '_handlers', '_tasks'
|
||||
]
|
||||
|
||||
|
@ -60,7 +60,7 @@ class Play(object):
|
|||
|
||||
self._ds = ds
|
||||
self.playbook = playbook
|
||||
self.hosts = hosts
|
||||
self.hosts = hosts
|
||||
self.name = ds.get('name', self.hosts)
|
||||
self.vars = ds.get('vars', {})
|
||||
self.vars_files = ds.get('vars_files', [])
|
||||
|
@ -126,7 +126,7 @@ class Play(object):
|
|||
|
||||
def tasks(self):
|
||||
''' return task objects for this play '''
|
||||
return self._tasks
|
||||
return self._tasks
|
||||
|
||||
def handlers(self):
|
||||
''' return handler objects for this play '''
|
||||
|
@ -146,7 +146,7 @@ class Play(object):
|
|||
raise errors.AnsibleError("'vars' section must contain only key/value pairs")
|
||||
|
||||
vars = self.playbook.global_vars
|
||||
|
||||
|
||||
# translate a list of vars into a dict
|
||||
if type(self.vars) == list:
|
||||
for item in self.vars:
|
||||
|
@ -178,7 +178,7 @@ class Play(object):
|
|||
|
||||
def update_vars_files(self, hosts):
|
||||
''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
|
||||
|
||||
|
||||
# now loop through all the hosts...
|
||||
for h in hosts:
|
||||
self._update_vars_files_for_host(h)
|
||||
|
@ -196,11 +196,11 @@ class Play(object):
|
|||
return True
|
||||
|
||||
if tags_counted > 0:
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
# didn't tag the play, and the play contains no steps
|
||||
# so assume we just want to gather facts
|
||||
return True
|
||||
return True
|
||||
|
||||
# *************************************************
|
||||
|
||||
|
@ -213,7 +213,7 @@ class Play(object):
|
|||
|
||||
if type(self.vars_files) != list:
|
||||
self.vars_files = [ self.vars_files ]
|
||||
|
||||
|
||||
if (host is not None):
|
||||
inventory = self.playbook.inventory
|
||||
hostrec = inventory.get_host(host)
|
||||
|
@ -288,8 +288,8 @@ class Play(object):
|
|||
raise errors.AnsibleError("%s must be stored as dictonary/hash: %s" % filename4)
|
||||
if host is not None and self._has_vars_in(filename2) and not self._has_vars_in(filename3):
|
||||
# running a host specific pass and has host specific variables
|
||||
# load into setup cache
|
||||
# load into setup cache
|
||||
self.playbook.SETUP_CACHE[host].update(new_vars)
|
||||
elif host is None:
|
||||
# running a non-host specific pass and we can update the global vars instead
|
||||
# running a non-host specific pass and we can update the global vars instead
|
||||
self.vars.update(new_vars)
|
||||
|
|
|
@ -20,9 +20,9 @@ from ansible import utils
|
|||
|
||||
class Task(object):
|
||||
|
||||
__slots__ = [
|
||||
__slots__ = [
|
||||
'name', 'action', 'only_if', 'async_seconds', 'async_poll_interval',
|
||||
'notify', 'module_name', 'module_args', 'module_vars',
|
||||
'notify', 'module_name', 'module_args', 'module_vars',
|
||||
'play', 'notified_by', 'tags', 'with_items', 'first_available_file', 'ignore_errors'
|
||||
]
|
||||
|
||||
|
@ -63,7 +63,7 @@ class Task(object):
|
|||
self.first_available_file = ds.get('first_available_file', None)
|
||||
self.with_items = ds.get('with_items', None)
|
||||
self.ignore_errors = ds.get('ignore_errors', False)
|
||||
|
||||
|
||||
# notify can be a string or a list, store as a list
|
||||
if isinstance(self.notify, basestring):
|
||||
self.notify = [ self.notify ]
|
||||
|
@ -92,8 +92,8 @@ class Task(object):
|
|||
# make first_available_file accessable to Runner code
|
||||
if self.first_available_file:
|
||||
self.module_vars['first_available_file'] = self.first_available_file
|
||||
|
||||
# process with_items so it can be used by Runner code
|
||||
|
||||
# process with_items so it can be used by Runner code
|
||||
if self.with_items is None:
|
||||
self.with_items = [ ]
|
||||
self.module_vars['items'] = self.with_items
|
||||
|
@ -109,4 +109,4 @@ class Task(object):
|
|||
elif type(apply_tags) == list:
|
||||
self.tags.extend(apply_tags)
|
||||
self.tags.extend(import_tags)
|
||||
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ import codecs
|
|||
import collections
|
||||
import re
|
||||
|
||||
import ansible.constants as C
|
||||
import ansible.constants as C
|
||||
import ansible.inventory
|
||||
from ansible import utils
|
||||
from ansible import errors
|
||||
|
@ -38,7 +38,7 @@ from ansible import module_common
|
|||
import poller
|
||||
import connection
|
||||
from ansible import callbacks as ans_callbacks
|
||||
|
||||
|
||||
HAS_ATFORK=True
|
||||
try:
|
||||
from Crypto.Random import atfork
|
||||
|
@ -65,7 +65,7 @@ def _executor_hook(job_queue, result_queue):
|
|||
pass
|
||||
except:
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
################################################
|
||||
|
||||
class ReturnData(object):
|
||||
|
@ -103,7 +103,7 @@ class Runner(object):
|
|||
|
||||
# see bin/ansible for how this is used...
|
||||
|
||||
def __init__(self,
|
||||
def __init__(self,
|
||||
host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage
|
||||
module_path=C.DEFAULT_MODULE_PATH, # ex: /usr/share/ansible
|
||||
module_name=C.DEFAULT_MODULE_NAME, # ex: copy
|
||||
|
@ -114,7 +114,7 @@ class Runner(object):
|
|||
remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username'
|
||||
remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
|
||||
remote_port=C.DEFAULT_REMOTE_PORT, # if SSH on different ports
|
||||
private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
|
||||
private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
|
||||
sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None
|
||||
background=0, # async poll every X seconds, else 0 for non-async
|
||||
basedir=None, # directory of playbook, if applicable
|
||||
|
@ -125,7 +125,7 @@ class Runner(object):
|
|||
verbose=False, # whether to show more or less
|
||||
sudo=False, # whether to run sudo or not
|
||||
sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root'
|
||||
module_vars=None, # a playbooks internals thing
|
||||
module_vars=None, # a playbooks internals thing
|
||||
is_playbook=False, # running from playbook or not?
|
||||
inventory=None # reference to Inventory object
|
||||
):
|
||||
|
@ -162,7 +162,7 @@ class Runner(object):
|
|||
raise errors.AnsibleError("SSH transport does not support passwords, only keys or agents")
|
||||
if self.transport == 'local':
|
||||
self.remote_user = pwd.getpwuid(os.geteuid())[0]
|
||||
|
||||
|
||||
# ensure we are using unique tmp paths
|
||||
random.seed()
|
||||
|
||||
|
@ -201,7 +201,7 @@ class Runner(object):
|
|||
|
||||
# *****************************************************
|
||||
|
||||
def _execute_module(self, conn, tmp, module_name, args,
|
||||
def _execute_module(self, conn, tmp, module_name, args,
|
||||
async_jid=None, async_module=None, async_limit=None, inject=None):
|
||||
|
||||
''' runs a module that has already been transferred '''
|
||||
|
@ -270,7 +270,7 @@ class Runner(object):
|
|||
|
||||
return self._execute_module(conn, tmp, 'async_wrapper', module_args,
|
||||
async_module=module_path,
|
||||
async_jid=self.generated_jid,
|
||||
async_jid=self.generated_jid,
|
||||
async_limit=self.background,
|
||||
inject=inject
|
||||
)
|
||||
|
@ -301,7 +301,7 @@ class Runner(object):
|
|||
if not found:
|
||||
results=dict(failed=True, msg="could not find src in first_available_file list")
|
||||
return ReturnData(host=conn.host, results=results)
|
||||
|
||||
|
||||
source = utils.template(source, inject)
|
||||
source = utils.path_dwim(self.basedir, source)
|
||||
|
||||
|
@ -309,10 +309,10 @@ class Runner(object):
|
|||
if local_md5 is None:
|
||||
result=dict(failed=True, msg="could not find src=%s" % source)
|
||||
return ReturnData(host=conn.host, result=result)
|
||||
|
||||
remote_md5 = self._remote_md5(conn, tmp, dest)
|
||||
|
||||
exec_rc = None
|
||||
remote_md5 = self._remote_md5(conn, tmp, dest)
|
||||
|
||||
exec_rc = None
|
||||
if local_md5 != remote_md5:
|
||||
# transfer the file to a remote tmp location
|
||||
tmp_src = tmp + source.split('/')[-1]
|
||||
|
@ -344,7 +344,7 @@ class Runner(object):
|
|||
source = utils.template(source, inject)
|
||||
# apply templating to dest argument
|
||||
dest = utils.template(dest, inject)
|
||||
|
||||
|
||||
# files are saved in dest dir, with a subdir for each host, then the filename
|
||||
dest = "%s/%s/%s" % (utils.path_dwim(self.basedir, dest), conn.host, source)
|
||||
dest = dest.replace("//","/")
|
||||
|
@ -383,7 +383,7 @@ class Runner(object):
|
|||
else:
|
||||
result = dict(changed=False, md5sum=local_md5, file=source)
|
||||
return ReturnData(host=conn.host, result=result)
|
||||
|
||||
|
||||
# *****************************************************
|
||||
|
||||
def _execute_template(self, conn, tmp, inject=None):
|
||||
|
@ -423,7 +423,7 @@ class Runner(object):
|
|||
result = dict(failed=True, msg=str(e))
|
||||
return ReturnData(host=conn.host, comm_ok=False, result=result)
|
||||
xfered = self._transfer_str(conn, tmp, 'source', resultant)
|
||||
|
||||
|
||||
# run the copy module, queue the file module
|
||||
self.module_args = "%s src=%s dest=%s" % (self.module_args, xfered, dest)
|
||||
return self._execute_module(conn, tmp, 'copy', self.module_args, inject=inject).daisychain('file')
|
||||
|
@ -435,7 +435,7 @@ class Runner(object):
|
|||
|
||||
# FIXME: once assemble is ported over to the use the new common logic, this method
|
||||
# will be unneccessary as it can decide to daisychain via it's own module returns.
|
||||
# and this function can be deleted.
|
||||
# and this function can be deleted.
|
||||
|
||||
return self._execute_module(conn, tmp, 'assemble', self.module_args, inject=inject).daisychain('file')
|
||||
|
||||
|
@ -489,7 +489,7 @@ class Runner(object):
|
|||
# hack for apt and soon yum, with_items maps back into a single module call
|
||||
inject['item'] = ",".join(items)
|
||||
items = []
|
||||
|
||||
|
||||
if len(items) == 0:
|
||||
return self._executor_internal_inner(host, inject, port)
|
||||
else:
|
||||
|
@ -589,7 +589,7 @@ class Runner(object):
|
|||
changed = True
|
||||
result.result.update(result2.result)
|
||||
result.result['changed'] = changed
|
||||
|
||||
|
||||
del result.result['daisychain']
|
||||
|
||||
self._delete_remote_files(conn, tmp)
|
||||
|
@ -631,7 +631,7 @@ class Runner(object):
|
|||
out = "\n".join(stdout.readlines())
|
||||
else:
|
||||
out = stdout
|
||||
|
||||
|
||||
if type(stderr) != str:
|
||||
err = "\n".join(stderr.readlines())
|
||||
else:
|
||||
|
@ -642,15 +642,15 @@ class Runner(object):
|
|||
# *****************************************************
|
||||
|
||||
def _remote_md5(self, conn, tmp, path):
|
||||
''' takes a remote md5sum without requiring python, and returns 0 if no file '''
|
||||
|
||||
''' takes a remote md5sum without requiring python, and returns 0 if no file '''
|
||||
|
||||
test = "rc=0; [[ -r \"%s\" ]] || rc=2; [[ -f \"%s\" ]] || rc=1" % (path,path)
|
||||
md5s = [
|
||||
"(/usr/bin/md5sum %s 2>/dev/null)" % path,
|
||||
"(/sbin/md5sum -q %s 2>/dev/null)" % path,
|
||||
"(/usr/bin/digest -a md5 -v %s 2>/dev/null)" % path
|
||||
]
|
||||
|
||||
|
||||
cmd = " || ".join(md5s)
|
||||
cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path)
|
||||
return self._low_level_exec_command(conn, cmd, tmp, sudoable=False).split()[0]
|
||||
|
@ -702,7 +702,7 @@ class Runner(object):
|
|||
module_data = module_data.replace(module_common.REPLACER, module_common.MODULE_COMMON)
|
||||
encoded_args = "\"\"\"%s\"\"\"" % utils.template(self.module_args, inject).replace("\"","\\\"")
|
||||
module_data = module_data.replace(module_common.REPLACER_ARGS, encoded_args)
|
||||
|
||||
|
||||
# use the correct python interpreter for the host
|
||||
if 'ansible_python_interpreter' in inject:
|
||||
interpreter = inject['ansible_python_interpreter']
|
||||
|
@ -771,13 +771,13 @@ class Runner(object):
|
|||
|
||||
def run(self):
|
||||
''' xfer & run module on all matched hosts '''
|
||||
|
||||
|
||||
# find hosts that match the pattern
|
||||
hosts = self.inventory.list_hosts(self.pattern)
|
||||
if len(hosts) == 0:
|
||||
self.callbacks.on_no_hosts()
|
||||
return dict(contacted={}, dark={})
|
||||
|
||||
|
||||
hosts = [ (self,x) for x in hosts ]
|
||||
results = None
|
||||
if self.forks > 1:
|
||||
|
|
|
@ -80,17 +80,17 @@ class ParamikoConnection(object):
|
|||
|
||||
bufsize = 4096
|
||||
chan = self.ssh.get_transport().open_session()
|
||||
chan.get_pty()
|
||||
chan.get_pty()
|
||||
|
||||
if not self.runner.sudo or not sudoable:
|
||||
quoted_command = '"$SHELL" -c ' + pipes.quote(cmd)
|
||||
quoted_command = '"$SHELL" -c ' + pipes.quote(cmd)
|
||||
chan.exec_command(quoted_command)
|
||||
else:
|
||||
# Rather than detect if sudo wants a password this time, -k makes
|
||||
# Rather than detect if sudo wants a password this time, -k makes
|
||||
# sudo always ask for a password if one is required. The "--"
|
||||
# tells sudo that this is the end of sudo options and the command
|
||||
# follows. Passing a quoted compound command to sudo (or sudo -s)
|
||||
# directly doesn't work, so we shellquote it with pipes.quote()
|
||||
# directly doesn't work, so we shellquote it with pipes.quote()
|
||||
# and pass the quoted string to the user's shell. We loop reading
|
||||
# output until we see the randomly-generated sudo prompt set with
|
||||
# the -p option.
|
||||
|
|
|
@ -58,11 +58,11 @@ class SSHConnection(object):
|
|||
|
||||
ssh_cmd = ["ssh", "-tt", "-q"] + self.common_args + [self.host]
|
||||
if self.runner.sudo and sudoable:
|
||||
# Rather than detect if sudo wants a password this time, -k makes
|
||||
# Rather than detect if sudo wants a password this time, -k makes
|
||||
# sudo always ask for a password if one is required. The "--"
|
||||
# tells sudo that this is the end of sudo options and the command
|
||||
# follows. Passing a quoted compound command to sudo (or sudo -s)
|
||||
# directly doesn't work, so we shellquote it with pipes.quote()
|
||||
# directly doesn't work, so we shellquote it with pipes.quote()
|
||||
# and pass the quoted string to the user's shell. We loop reading
|
||||
# output until we see the randomly-generated sudo prompt set with
|
||||
# the -p option.
|
||||
|
@ -104,12 +104,12 @@ class SSHConnection(object):
|
|||
stdout += os.read(p.stdout.fileno(), 1024)
|
||||
# older versions of ssh generate this error which we ignore
|
||||
stdout=stdout.replace("tcgetattr: Invalid argument\n", "")
|
||||
# suppress Ubuntu 10.04/12.04 error on -tt option
|
||||
# suppress Ubuntu 10.04/12.04 error on -tt option
|
||||
stdout=stdout.replace("tcgetattr: Inappropriate ioctl for device\n","")
|
||||
|
||||
if p.returncode != 0 and stdout.find('Bad configuration option: ControlPersist') != -1:
|
||||
raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" before running again')
|
||||
|
||||
|
||||
return ('', stdout, '')
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
|
|
|
@ -36,7 +36,7 @@ except ImportError:
|
|||
|
||||
try:
|
||||
from hashlib import md5 as _md5
|
||||
except ImportError:
|
||||
except ImportError:
|
||||
from md5 import md5 as _md5
|
||||
|
||||
###############################################################
|
||||
|
@ -117,10 +117,10 @@ def parse_json(data):
|
|||
results = {}
|
||||
try:
|
||||
tokens = shlex.split(data)
|
||||
except:
|
||||
except:
|
||||
print "failed to parse json: "+ data
|
||||
raise
|
||||
|
||||
raise
|
||||
|
||||
for t in tokens:
|
||||
if t.find("=") == -1:
|
||||
raise errors.AnsibleError("failed to parse: %s" % data)
|
||||
|
@ -131,7 +131,7 @@ def parse_json(data):
|
|||
elif value.lower() in [ 'false', '0' ]:
|
||||
value = False
|
||||
if key == 'rc':
|
||||
value = int(value)
|
||||
value = int(value)
|
||||
results[key] = value
|
||||
if len(results.keys()) == 0:
|
||||
return { "failed" : True, "parsed" : False, "msg" : data }
|
||||
|
@ -204,7 +204,7 @@ def template(text, vars):
|
|||
if (depth > 20):
|
||||
raise errors.AnsibleError("template recursion depth exceeded")
|
||||
prev_text = text
|
||||
text = varReplace(unicode(text), vars)
|
||||
text = varReplace(unicode(text), vars)
|
||||
return text
|
||||
|
||||
def template_from_file(basedir, path, vars):
|
||||
|
@ -223,7 +223,7 @@ def parse_yaml(data):
|
|||
''' convert a yaml string to a data structure '''
|
||||
|
||||
return yaml.load(data)
|
||||
|
||||
|
||||
def parse_yaml_from_file(path):
|
||||
''' convert a yaml file to a data structure '''
|
||||
|
||||
|
@ -268,7 +268,7 @@ def default(value, function):
|
|||
return value
|
||||
|
||||
####################################################################
|
||||
# option handling code for /usr/bin/ansible and ansible-playbook
|
||||
# option handling code for /usr/bin/ansible and ansible-playbook
|
||||
# below this line
|
||||
|
||||
class SortedOptParser(optparse.OptionParser):
|
||||
|
@ -287,7 +287,7 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
|
|||
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
|
||||
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
|
||||
parser.add_option('-i', '--inventory-file', dest='inventory',
|
||||
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
|
||||
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
|
||||
default=constants.DEFAULT_HOST_LIST)
|
||||
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
|
||||
help='ask for SSH password')
|
||||
|
@ -299,7 +299,7 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
|
|||
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
|
||||
default=constants.DEFAULT_MODULE_PATH)
|
||||
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
|
||||
dest='timeout',
|
||||
dest='timeout',
|
||||
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
|
||||
|
||||
if output_opts:
|
||||
|
@ -314,9 +314,9 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
|
|||
parser.add_option('-U', '--sudo-user', dest='sudo_user', help='desired sudo user (default=root)',
|
||||
default=None) # Can't default to root because we need to detect when this option was given
|
||||
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER,
|
||||
dest='remote_user',
|
||||
dest='remote_user',
|
||||
help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
|
||||
|
||||
|
||||
if connect_opts:
|
||||
parser.add_option('-c', '--connection', dest='connection',
|
||||
choices=C.DEFAULT_TRANSPORT_OPTS,
|
||||
|
@ -325,7 +325,7 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, asyn
|
|||
|
||||
if async_opts:
|
||||
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
|
||||
dest='poll_interval',
|
||||
dest='poll_interval',
|
||||
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
|
||||
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
|
||||
help='run asynchronously, failing after X seconds (default=N/A)')
|
||||
|
|
|
@ -77,7 +77,7 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, install_reco
|
|||
installed, upgradable = package_status(m, name, version, cache)
|
||||
if not installed or (upgrade and upgradable):
|
||||
packages += "'%s' " % package
|
||||
|
||||
|
||||
if len(packages) != 0:
|
||||
if force:
|
||||
force_yes = '--force-yes'
|
||||
|
@ -105,7 +105,7 @@ def remove(m, pkgspec, cache, purge=False):
|
|||
installed, upgradable = package_status(m, name, version, cache)
|
||||
if installed:
|
||||
packages += "'%s' " % package
|
||||
|
||||
|
||||
if len(packages) == 0:
|
||||
m.exit_json(changed=False)
|
||||
else:
|
||||
|
@ -157,9 +157,9 @@ def main():
|
|||
module.exit_json(changed=False)
|
||||
|
||||
force_yes = module.boolean(p['force'])
|
||||
|
||||
|
||||
packages = p['package'].split(',')
|
||||
latest = p['state'] == 'latest'
|
||||
latest = p['state'] == 'latest'
|
||||
for package in packages:
|
||||
if package.count('=') > 1:
|
||||
module.fail_json(msg="invalid package spec: %s" % package)
|
||||
|
|
|
@ -45,39 +45,39 @@ def write_temp_file(data):
|
|||
# main
|
||||
|
||||
def main():
|
||||
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
src = dict(required=True),
|
||||
dest = dict(required=True),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
changed=False
|
||||
pathmd5 = None
|
||||
destmd5 = None
|
||||
src = os.path.expanduser(module.params['src'])
|
||||
dest = os.path.expanduser(module.params['dest'])
|
||||
|
||||
|
||||
if not os.path.exists(src):
|
||||
module.fail_json(msg="Source (%s) does not exist" % src)
|
||||
|
||||
|
||||
if not os.path.isdir(src):
|
||||
module.fail_json(msg="Source (%s) is not a directory" % src)
|
||||
|
||||
|
||||
path = write_temp_file(assemble_from_fragments(src))
|
||||
pathmd5 = module.md5(path)
|
||||
|
||||
|
||||
if os.path.exists(dest):
|
||||
destmd5 = module.md5(dest)
|
||||
|
||||
|
||||
if pathmd5 != destmd5:
|
||||
shutil.copy(path, dest)
|
||||
changed = True
|
||||
|
||||
|
||||
|
||||
# Mission complete
|
||||
module.exit_json(src=src, dest=dest, md5sum=destmd5,
|
||||
module.exit_json(src=src, dest=dest, md5sum=destmd5,
|
||||
changed=changed, msg="OK",
|
||||
daisychain="file", daisychain_args=module.params)
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ def main():
|
|||
# file not written yet? That means it is running
|
||||
module.exit_json(results_file=log_path, ansible_job_id=jid, started=1)
|
||||
else:
|
||||
module_fail_json(ansible_job_id=jid, results_file=log_path,
|
||||
module_fail_json(ansible_job_id=jid, results_file=log_path,
|
||||
msg="Could not parse job output: %s" % data)
|
||||
|
||||
if not data.has_key("started"):
|
||||
|
|
|
@ -63,7 +63,7 @@ def daemonize_self():
|
|||
dev_null = file('/dev/null','rw')
|
||||
os.dup2(dev_null.fileno(), sys.stdin.fileno())
|
||||
os.dup2(dev_null.fileno(), sys.stdout.fileno())
|
||||
os.dup2(dev_null.fileno(), sys.stderr.fileno())
|
||||
os.dup2(dev_null.fileno(), sys.stderr.fileno())
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
print json.dumps({
|
||||
|
@ -101,11 +101,11 @@ def _run_command(wrapped_cmd, jid, log_path):
|
|||
logfile.close()
|
||||
logfile = open(log_path, "w")
|
||||
result = {}
|
||||
|
||||
|
||||
outdata = ''
|
||||
try:
|
||||
cmd = shlex.split(wrapped_cmd)
|
||||
script = subprocess.Popen(cmd, shell=False,
|
||||
script = subprocess.Popen(cmd, shell=False,
|
||||
stdin=None, stdout=logfile, stderr=logfile)
|
||||
script.communicate()
|
||||
outdata = file(log_path).read()
|
||||
|
@ -125,7 +125,7 @@ def _run_command(wrapped_cmd, jid, log_path):
|
|||
"cmd" : wrapped_cmd,
|
||||
"data" : outdata, # temporary debug only
|
||||
"msg" : traceback.format_exc()
|
||||
}
|
||||
}
|
||||
result['ansible_job_id'] = jid
|
||||
logfile.write(json.dumps(result))
|
||||
logfile.close()
|
||||
|
|
|
@ -38,7 +38,7 @@ import os.path
|
|||
|
||||
def keyfile(user, write=False):
|
||||
"""
|
||||
Calculate name of authorized keys file, optionally creating the
|
||||
Calculate name of authorized keys file, optionally creating the
|
||||
directories and file, properly setting permissions.
|
||||
|
||||
:param str user: name of user in passwd file
|
||||
|
@ -51,13 +51,13 @@ def keyfile(user, write=False):
|
|||
sshdir = os.path.join(homedir, ".ssh")
|
||||
keysfile = os.path.join(sshdir, "authorized_keys")
|
||||
|
||||
if not write:
|
||||
if not write:
|
||||
return keysfile
|
||||
|
||||
uid = user_entry.pw_uid
|
||||
gid = user_entry.pw_gid
|
||||
|
||||
if not os.path.exists(sshdir):
|
||||
if not os.path.exists(sshdir):
|
||||
os.mkdir(sshdir, 0700)
|
||||
os.chown(sshdir, uid, gid)
|
||||
os.chmod(sshdir, 0700)
|
||||
|
@ -74,7 +74,7 @@ def keyfile(user, write=False):
|
|||
|
||||
def readkeys(filename):
|
||||
|
||||
if not os.path.isfile(filename):
|
||||
if not os.path.isfile(filename):
|
||||
return []
|
||||
f = open(filename)
|
||||
keys = [line.rstrip() for line in f.readlines()]
|
||||
|
@ -97,19 +97,19 @@ def enforce_state(module, params):
|
|||
state = params.get("state", "present")
|
||||
|
||||
# check current state -- just get the filename, don't create file
|
||||
params["keyfile"] = keyfile(user, write=False)
|
||||
params["keyfile"] = keyfile(user, write=False)
|
||||
keys = readkeys(params["keyfile"])
|
||||
present = key in keys
|
||||
|
||||
# handle idempotent state=present
|
||||
if state=="present":
|
||||
if present:
|
||||
if present:
|
||||
module.exit_json(changed=False)
|
||||
keys.append(key)
|
||||
writekeys(keyfile(user,write=True), keys)
|
||||
|
||||
elif state=="absent":
|
||||
if not present:
|
||||
if not present:
|
||||
module.exit_json(changed=False)
|
||||
keys.remove(key)
|
||||
writekeys(keyfile(user,write=True), keys)
|
||||
|
@ -133,4 +133,4 @@ def main():
|
|||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
main()
|
||||
main()
|
||||
|
|
|
@ -30,7 +30,7 @@ def main():
|
|||
src=dict(required=True),
|
||||
dest=dict(required=True)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
src = os.path.expanduser(module.params['src'])
|
||||
dest = os.path.expanduser(module.params['dest'])
|
||||
|
@ -60,9 +60,9 @@ def main():
|
|||
try:
|
||||
shutil.copyfile(src, dest)
|
||||
except shutil.Error:
|
||||
module.fail_json(msg="failed to copy: %s and %s are the same" % (src, dest))
|
||||
module.fail_json(msg="failed to copy: %s and %s are the same" % (src, dest))
|
||||
except IOError:
|
||||
module.fail_json(msg="failed to copy: %s to %s" % (src, dest))
|
||||
module.fail_json(msg="failed to copy: %s to %s" % (src, dest))
|
||||
changed = True
|
||||
else:
|
||||
changed = False
|
||||
|
|
18
library/file
18
library/file
|
@ -47,7 +47,7 @@ def add_path_info(kwargs):
|
|||
kwargs['secontext'] = ':'.join(selinux_context(path))
|
||||
else:
|
||||
kwargs['state'] = 'absent'
|
||||
return kwargs
|
||||
return kwargs
|
||||
|
||||
def module_exit_json(**kwargs):
|
||||
add_path_info(kwargs)
|
||||
|
@ -155,7 +155,7 @@ def set_context_if_different(path, context, changed):
|
|||
module_fail_json(path=path, msg='set selinux context failed')
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
|
||||
def set_owner_if_different(path, owner, changed):
|
||||
if owner is None:
|
||||
return changed
|
||||
|
@ -167,7 +167,7 @@ def set_owner_if_different(path, owner, changed):
|
|||
return True
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def set_group_if_different(path, group, changed):
|
||||
if group is None:
|
||||
return changed
|
||||
|
@ -186,8 +186,8 @@ def set_mode_if_different(path, mode, changed):
|
|||
# FIXME: support English modes
|
||||
mode = int(mode, 8)
|
||||
except Exception, e:
|
||||
module_fail_json(path=path, msg='mode needs to be something octalish', details=str(e))
|
||||
|
||||
module_fail_json(path=path, msg='mode needs to be something octalish', details=str(e))
|
||||
|
||||
st = os.stat(path)
|
||||
prev_mode = stat.S_IMODE(st[stat.ST_MODE])
|
||||
|
||||
|
@ -290,7 +290,7 @@ def main():
|
|||
module_exit_json(path=path, changed=False)
|
||||
|
||||
if state == 'file':
|
||||
|
||||
|
||||
if prev_state == 'absent':
|
||||
module_fail_json(path=path, msg='file does not exist, use copy or template module to create')
|
||||
|
||||
|
@ -307,7 +307,7 @@ def main():
|
|||
if prev_state == 'absent':
|
||||
os.makedirs(path)
|
||||
changed = True
|
||||
|
||||
|
||||
# set modes owners and context as needed
|
||||
changed = set_context_if_different(path, secontext, changed)
|
||||
changed = set_owner_if_different(path, owner, changed)
|
||||
|
@ -317,14 +317,14 @@ def main():
|
|||
module_exit_json(path=path, changed=changed)
|
||||
|
||||
elif state == 'link':
|
||||
|
||||
|
||||
if os.path.isabs(src):
|
||||
abs_src = src
|
||||
else:
|
||||
abs_src = os.path.join(os.path.dirname(dest), src)
|
||||
if not os.path.exists(abs_src):
|
||||
module_fail_json(path=path, src=src, msg='src file does not exist')
|
||||
|
||||
|
||||
if prev_state == 'absent':
|
||||
os.symlink(src, path)
|
||||
changed = True
|
||||
|
|
|
@ -27,14 +27,14 @@ import tempfile
|
|||
HAS_URLLIB2=True
|
||||
try:
|
||||
import urllib2
|
||||
except ImportError:
|
||||
except ImportError:
|
||||
HAS_URLLIB2=False
|
||||
HAS_URLPARSE=True
|
||||
|
||||
try:
|
||||
import urlparse
|
||||
import socket
|
||||
except ImportError:
|
||||
except ImportError:
|
||||
HAS_URLPARSE=False
|
||||
|
||||
# ==============================================================
|
||||
|
@ -92,7 +92,7 @@ def url_do_get(module, url, dest):
|
|||
|
||||
def url_get(module, url, dest):
|
||||
"""
|
||||
Download url and store at dest.
|
||||
Download url and store at dest.
|
||||
If dest is a directory, determine filename from url.
|
||||
Return (tempfile, info about the request)
|
||||
"""
|
||||
|
@ -123,8 +123,8 @@ def url_get(module, url, dest):
|
|||
# main
|
||||
|
||||
def main():
|
||||
|
||||
# does this really happen on non-ancient python?
|
||||
|
||||
# does this really happen on non-ancient python?
|
||||
if not HAS_URLLIB2:
|
||||
module.fail_json(msg="urllib2 is not installed")
|
||||
if not HAS_URLPARSE:
|
||||
|
@ -138,16 +138,16 @@ def main():
|
|||
dest = dict(required=True),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
url = module.params['url']
|
||||
dest = os.path.expanduser(module.params['dest'])
|
||||
|
||||
|
||||
# download to tmpsrc
|
||||
tmpsrc, info = url_get(module, url, dest)
|
||||
md5sum_src = None
|
||||
md5sum_dest = None
|
||||
dest = info['actualdest']
|
||||
|
||||
|
||||
# raise an error if there is no tmpsrc file
|
||||
if not os.path.exists(tmpsrc):
|
||||
os.remove(tmpsrc)
|
||||
|
@ -156,7 +156,7 @@ def main():
|
|||
os.remove(tmpsrc)
|
||||
module.fail_json( msg="Source %s not readable" % (tmpsrc))
|
||||
md5sum_src = module.md5(tmpsrc)
|
||||
|
||||
|
||||
# check if there is no dest file
|
||||
if os.path.exists(dest):
|
||||
# raise an error if copy has no permission on dest
|
||||
|
@ -171,22 +171,22 @@ def main():
|
|||
if not os.access(os.path.dirname(dest), os.W_OK):
|
||||
os.remove(tmpsrc)
|
||||
module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest)))
|
||||
|
||||
|
||||
if md5sum_src != md5sum_dest:
|
||||
try:
|
||||
shutil.copyfile(tmpsrc, dest)
|
||||
except Exception, err:
|
||||
os.remove(tmpsrc)
|
||||
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
|
||||
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
|
||||
changed = True
|
||||
else:
|
||||
changed = False
|
||||
|
||||
|
||||
os.remove(tmpsrc)
|
||||
|
||||
# Mission complete
|
||||
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src,
|
||||
changed=changed, msg=info.get('msg',''),
|
||||
module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src,
|
||||
changed=changed, msg=info.get('msg',''),
|
||||
daisychain="file", daisychain_args=info.get('daisychain_args',''))
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
|
|
|
@ -166,7 +166,7 @@ def main():
|
|||
if rc != 0:
|
||||
module.fail_json(msg=err)
|
||||
else:
|
||||
# else do a pull
|
||||
# else do a pull
|
||||
before = get_version(dest)
|
||||
(rc, out, err) = reset(dest)
|
||||
if rc != 0:
|
||||
|
|
|
@ -80,7 +80,7 @@ def group_info(group):
|
|||
except KeyError:
|
||||
return False
|
||||
return info
|
||||
|
||||
|
||||
# ===========================================
|
||||
|
||||
def main():
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
# mount module - mount fs and define in fstab
|
||||
# usage:
|
||||
#
|
||||
# mount name=mountpoint, src=device_to_be_mounted fstype=fstype
|
||||
# opts=mount_opts, dump=0 passno=0 state=[present|absent|mounted|unmounted]
|
||||
# mount name=mountpoint, src=device_to_be_mounted fstype=fstype
|
||||
# opts=mount_opts, dump=0 passno=0 state=[present|absent|mounted|unmounted]
|
||||
#
|
||||
# absent == remove from fstab and unmounted
|
||||
# present == add to fstab, do not change mount state
|
||||
|
@ -52,7 +52,7 @@ def set_mount(**kwargs):
|
|||
)
|
||||
args.update(kwargs)
|
||||
|
||||
new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
|
||||
new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
|
||||
|
||||
to_write = []
|
||||
exists = False
|
||||
|
@ -69,7 +69,7 @@ def set_mount(**kwargs):
|
|||
# but it is not our fault so leave it be
|
||||
to_write.append(line)
|
||||
continue
|
||||
|
||||
|
||||
ld = {}
|
||||
ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
|
||||
|
||||
|
@ -88,16 +88,16 @@ def set_mount(**kwargs):
|
|||
to_write.append(new_line % ld)
|
||||
else:
|
||||
to_write.append(line)
|
||||
|
||||
|
||||
if not exists:
|
||||
to_write.append(new_line % args)
|
||||
changed = True
|
||||
|
||||
|
||||
if changed:
|
||||
write_fstab(to_write, args['fstab'])
|
||||
|
||||
return (args['name'], changed)
|
||||
|
||||
|
||||
|
||||
def unset_mount(**kwargs):
|
||||
""" remove a mount point from fstab """
|
||||
|
@ -125,7 +125,7 @@ def unset_mount(**kwargs):
|
|||
# but it is not our fault so leave it be
|
||||
to_write.append(line)
|
||||
continue
|
||||
|
||||
|
||||
ld = {}
|
||||
ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
|
||||
|
||||
|
@ -141,7 +141,7 @@ def unset_mount(**kwargs):
|
|||
|
||||
return (args['name'], changed)
|
||||
|
||||
|
||||
|
||||
def mount(**kwargs):
|
||||
""" mount up a path or remount if needed """
|
||||
|
||||
|
@ -185,7 +185,7 @@ def main():
|
|||
fstab = dict(default=None)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
changed = False
|
||||
rc = 0
|
||||
args = {
|
||||
|
@ -201,12 +201,12 @@ def main():
|
|||
args['dump'] = module.params['dump']
|
||||
if module.params['fstab'] is not None:
|
||||
args['fstab'] = module.params['fstab']
|
||||
|
||||
|
||||
# absent == remove from fstab and unmounted
|
||||
# unmounted == do not change fstab state, but unmount
|
||||
# present == add to fstab, do not change mount state
|
||||
# mounted == add to fstab if not there and make sure it is mounted, if it has changed in fstab then remount it
|
||||
|
||||
|
||||
state = module.params['state']
|
||||
name = module.params['name']
|
||||
if state == 'absent':
|
||||
|
@ -216,24 +216,24 @@ def main():
|
|||
res,msg = umount(**args)
|
||||
if res:
|
||||
fail_json(msg="Error unmounting %s: %s" % (name, msg))
|
||||
|
||||
|
||||
if os.path.exists(name):
|
||||
try:
|
||||
os.rmdir(name)
|
||||
except (OSError, IOError), e:
|
||||
fail_json(msg="Error rmdir %s: %s" % (name, str(e)))
|
||||
|
||||
|
||||
module.exit_json(changed=changed, **args)
|
||||
|
||||
|
||||
if state == 'unmounted':
|
||||
if os.path.ismount(name):
|
||||
res,msg = umount(**args)
|
||||
if res:
|
||||
fail_json(msg="Error unmounting %s: %s" % (name, msg))
|
||||
changed = True
|
||||
|
||||
|
||||
module.exit_json(changed=changed, **args)
|
||||
|
||||
|
||||
if state in ['mounted', 'present']:
|
||||
name, changed = set_mount(**args)
|
||||
if state == 'mounted':
|
||||
|
@ -242,7 +242,7 @@ def main():
|
|||
os.makedirs(name)
|
||||
except (OSError, IOError), e:
|
||||
fail_json(msg="Error making dir %s: %s" % (name, str(e)))
|
||||
|
||||
|
||||
res = 0
|
||||
if os.path.ismount(name):
|
||||
if changed:
|
||||
|
@ -250,16 +250,16 @@ def main():
|
|||
else:
|
||||
changed = True
|
||||
res,msg = mount(**args)
|
||||
|
||||
|
||||
if res:
|
||||
fail_json(msg="Error mounting %s: %s" % (name, msg))
|
||||
|
||||
|
||||
|
||||
|
||||
module.exit_json(changed=changed, **args)
|
||||
|
||||
|
||||
module.fail_json(msg='Unexpected position reached')
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
main()
|
||||
|
|
|
@ -101,14 +101,14 @@ def _get_service_status(name):
|
|||
# iptables status command output is lame
|
||||
# TODO: lookup if we can use a return code for this instead?
|
||||
running = True
|
||||
|
||||
|
||||
return running
|
||||
|
||||
def _run(cmd):
|
||||
# returns (rc, stdout, stderr) from shell command
|
||||
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||
stdout, stderr = process.communicate()
|
||||
return (process.returncode, stdout, stderr)
|
||||
return (process.returncode, stdout, stderr)
|
||||
|
||||
|
||||
def _do_enable(name, enable):
|
||||
|
@ -126,9 +126,9 @@ def _do_enable(name, enable):
|
|||
rc, stdout, stderr = _run("%s %s %s" % (CHKCONFIG, name, valid_argument['on']))
|
||||
else:
|
||||
rc, stdout, stderr = _run("%s %s %s" % (CHKCONFIG, name, valid_argument['off']))
|
||||
|
||||
|
||||
return rc, stdout, stderr
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
|
@ -145,7 +145,7 @@ def main():
|
|||
# ===========================================
|
||||
# find binaries locations on minion
|
||||
_find_binaries(module)
|
||||
|
||||
|
||||
# ===========================================
|
||||
# get service status
|
||||
running = _get_service_status(name)
|
||||
|
@ -156,7 +156,7 @@ def main():
|
|||
rc = 0
|
||||
err = ''
|
||||
out = ''
|
||||
|
||||
|
||||
if module.params['enabled']:
|
||||
rc_enable, out_enable, err_enable = _do_enable(name, enable)
|
||||
rc += rc_enable
|
||||
|
@ -208,10 +208,10 @@ def main():
|
|||
result['state'] = state
|
||||
rc, stdout, stderr = _run("%s %s status" % (SERVICE, name))
|
||||
module.exit_json(**result);
|
||||
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
|
||||
main()
|
||||
|
||||
|
|
|
@ -360,7 +360,7 @@ class FreeBSDHardware(Hardware):
|
|||
# Get swapinfo. swapinfo output looks like:
|
||||
# Device 1M-blocks Used Avail Capacity
|
||||
# /dev/ada0p3 314368 0 314368 0%
|
||||
#
|
||||
#
|
||||
cmd = subprocess.Popen("/usr/sbin/swapinfo -m", shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = cmd.communicate()
|
||||
|
@ -601,7 +601,7 @@ def run_setup(module):
|
|||
|
||||
setup_options = {}
|
||||
facts = ansible_facts()
|
||||
|
||||
|
||||
for (k, v) in facts.items():
|
||||
setup_options["ansible_%s" % k] = v
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# There is actually no actual shell module source, when you use 'shell' in ansible,
|
||||
# it runs the 'command' module with special arguments and it behaves differently.
|
||||
# There is actually no actual shell module source, when you use 'shell' in ansible,
|
||||
# it runs the 'command' module with special arguments and it behaves differently.
|
||||
# See the command source and the comment "#USE_SHELL".
|
||||
|
|
42
library/virt
42
library/virt
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
|
@ -24,9 +24,9 @@ try:
|
|||
except ImportError:
|
||||
print "failed=True msg='libvirt python module unavailable'"
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
ALL_COMMANDS = []
|
||||
VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
|
||||
VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
|
||||
'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart']
|
||||
HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype']
|
||||
ALL_COMMANDS.extend(VM_COMMANDS)
|
||||
|
@ -141,11 +141,11 @@ class LibvirtConnection(object):
|
|||
def get_autostart(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.autostart()
|
||||
|
||||
|
||||
def set_autostart(self, vmid, val):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.setAutostart(val)
|
||||
|
||||
|
||||
|
||||
|
||||
class Virt(object):
|
||||
|
@ -157,7 +157,7 @@ class Virt(object):
|
|||
def get_vm(self, vmid):
|
||||
self.__get_conn()
|
||||
return self.conn.find_vm(vmid)
|
||||
|
||||
|
||||
def state(self):
|
||||
vms = self.list_vms()
|
||||
state = []
|
||||
|
@ -216,7 +216,7 @@ class Virt(object):
|
|||
|
||||
def virttype(self):
|
||||
return self.__get_conn().get_type()
|
||||
|
||||
|
||||
def autostart(self, vmid):
|
||||
self.conn = self.__get_conn()
|
||||
return self.conn.set_autostart(vmid, True)
|
||||
|
@ -313,26 +313,26 @@ def core(module):
|
|||
|
||||
v = Virt()
|
||||
res = {}
|
||||
|
||||
|
||||
|
||||
|
||||
if state:
|
||||
if not guest:
|
||||
module.fail_json(msg = "state change requires a guest specified")
|
||||
|
||||
|
||||
res['changed'] = False
|
||||
if state == 'running':
|
||||
if v.status(guest) is not 'running':
|
||||
res['changed'] = True
|
||||
res['changed'] = True
|
||||
res['msg'] = v.start(guest)
|
||||
elif state == 'shutdown':
|
||||
if v.status(guest) is not 'shutdown':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.shutdown(guest)
|
||||
else:
|
||||
module.fail_json(msg="unexpected state")
|
||||
|
||||
module.fail_json(msg="unexpected state")
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
|
||||
if command:
|
||||
if command in VM_COMMANDS:
|
||||
if not guest:
|
||||
|
@ -341,20 +341,20 @@ def core(module):
|
|||
if type(res) != dict:
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
|
||||
elif hasattr(v, command):
|
||||
res = getattr(v, command)()
|
||||
if type(res) != dict:
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Command %s not recognized" % basecmd)
|
||||
|
||||
module.fail_json(msg="expected state or command parameter to be specified")
|
||||
|
||||
|
||||
module.fail_json(msg="expected state or command parameter to be specified")
|
||||
|
||||
def main():
|
||||
|
||||
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
name = dict(aliases=['guest']),
|
||||
state = dict(choices=['running', 'shutdown']),
|
||||
|
@ -375,4 +375,4 @@ def main():
|
|||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||
main()
|
||||
main()
|
||||
|
|
78
library/yum
78
library/yum
|
@ -33,7 +33,7 @@ def is_installed(repoq, pkgspec, qf=def_qf):
|
|||
rc,out,err = run(cmd)
|
||||
if rc == 0:
|
||||
return [ p for p in out.split('\n') if p.strip() ]
|
||||
|
||||
|
||||
return []
|
||||
|
||||
def is_available(repoq, pkgspec, qf=def_qf):
|
||||
|
@ -50,7 +50,7 @@ def is_update(repoq, pkgspec, qf=def_qf):
|
|||
rc,out,err = run(cmd)
|
||||
if rc == 0:
|
||||
return set([ p for p in out.split('\n') if p.strip() ])
|
||||
|
||||
|
||||
return []
|
||||
|
||||
|
||||
|
@ -60,26 +60,26 @@ def what_provides(repoq, req_spec, qf=def_qf):
|
|||
ret = []
|
||||
if rc == 0:
|
||||
ret = set([ p for p in out.split('\n') if p.strip() ])
|
||||
|
||||
|
||||
return ret
|
||||
|
||||
def local_nvra(path):
|
||||
"""return nvra of a local rpm passed in"""
|
||||
|
||||
|
||||
cmd = "/bin/rpm -qp --qf='%%{name}-%%{version}-%%{release}.%%{arch}\n' %s'" % path
|
||||
rc, out, err = run(cmd)
|
||||
if rc != 0:
|
||||
return None
|
||||
nvra = out.split('\n')[0]
|
||||
return nvra
|
||||
|
||||
|
||||
|
||||
|
||||
def pkg_to_dict(pkgstr):
|
||||
if pkgstr.strip():
|
||||
n,e,v,r,a,repo = pkgstr.split('|')
|
||||
else:
|
||||
return {'error_parsing': pkgstr}
|
||||
|
||||
|
||||
d = {
|
||||
'name':n,
|
||||
'arch':a,
|
||||
|
@ -89,7 +89,7 @@ def pkg_to_dict(pkgstr):
|
|||
'repo':repo,
|
||||
'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a)
|
||||
}
|
||||
|
||||
|
||||
if repo == 'installed':
|
||||
d['yumstate'] = 'installed'
|
||||
else:
|
||||
|
@ -103,16 +103,16 @@ def repolist(repoq, qf="%{repoid}"):
|
|||
ret = []
|
||||
if rc == 0:
|
||||
ret = set([ p for p in out.split('\n') if p.strip() ])
|
||||
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def list_stuff(conf_file, stuff):
|
||||
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
|
||||
repoq = '%s --plugins --quiet -q ' % repoquery
|
||||
if conf_file and os.path.exists(conf_file):
|
||||
repoq = '%s -c %s --plugins --quiet -q ' % (repoquery,conf_file)
|
||||
|
||||
|
||||
|
||||
if stuff == 'installed':
|
||||
return [ pkg_to_dict(p) for p in is_installed(repoq, '-a', qf=qf) if p.strip() ]
|
||||
elif stuff == 'updates':
|
||||
|
@ -126,7 +126,7 @@ def list_stuff(conf_file, stuff):
|
|||
|
||||
def run(command):
|
||||
try:
|
||||
cmd = subprocess.Popen(command, shell=True,
|
||||
cmd = subprocess.Popen(command, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = cmd.communicate()
|
||||
except (OSError, IOError), e:
|
||||
|
@ -144,7 +144,7 @@ def run(command):
|
|||
err = ''
|
||||
else:
|
||||
rc = cmd.returncode
|
||||
|
||||
|
||||
return rc, out, err
|
||||
|
||||
|
||||
|
@ -161,8 +161,8 @@ def install(module, items, repoq, yum_basecmd):
|
|||
# check if pkgspec is installed (if possible for idempotence)
|
||||
# localpkg
|
||||
if spec.endswith('.rpm'):
|
||||
# get the pkg name-v-r.arch
|
||||
nvra = local_nvra(spec)
|
||||
# get the pkg name-v-r.arch
|
||||
nvra = local_nvra(spec)
|
||||
# look for them in the rpmdb
|
||||
if is_installed(repoq, nvra):
|
||||
# if they are there, skip it
|
||||
|
@ -181,23 +181,23 @@ def install(module, items, repoq, yum_basecmd):
|
|||
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
|
||||
res['failed'] = True
|
||||
module.exit_json(**res)
|
||||
|
||||
|
||||
# if any of them are installed
|
||||
# then nothing to do
|
||||
|
||||
|
||||
found = False
|
||||
for this in pkglist:
|
||||
if is_installed(repoq, this):
|
||||
found = True
|
||||
res['results'].append('%s providing %s is already installed' % (this, spec))
|
||||
|
||||
|
||||
if found:
|
||||
continue
|
||||
# if not - then pass in the spec as what to install
|
||||
# we could get here if nothing provides it but that's not
|
||||
# the error we're catching here
|
||||
# we could get here if nothing provides it but that's not
|
||||
# the error we're catching here
|
||||
pkg = spec
|
||||
|
||||
|
||||
cmd = "%s install '%s'" % (yum_basecmd, pkg)
|
||||
rc, out, err = run(cmd)
|
||||
# FIXME - if we did an install - go and check the rpmdb to see if it actually installed
|
||||
|
@ -213,9 +213,9 @@ def install(module, items, repoq, yum_basecmd):
|
|||
res['rc'] = 0
|
||||
res['results'].append(out)
|
||||
res['msg'] += err
|
||||
|
||||
|
||||
module.exit_json(**res)
|
||||
|
||||
|
||||
|
||||
def remove(module, items, repoq, yum_basecmd):
|
||||
res = {}
|
||||
|
@ -223,7 +223,7 @@ def remove(module, items, repoq, yum_basecmd):
|
|||
res['msg'] = ''
|
||||
res['changed'] = False
|
||||
res['rc'] = 0
|
||||
|
||||
|
||||
for spec in items:
|
||||
pkg = None
|
||||
|
||||
|
@ -237,12 +237,12 @@ def remove(module, items, repoq, yum_basecmd):
|
|||
res['msg'] += "No Package matching '%s' found available, installed or updated" % spec
|
||||
res['failed']=True
|
||||
module.exit_json(**res)
|
||||
|
||||
|
||||
found = False
|
||||
for this in pkglist:
|
||||
if is_installed(repoq, this):
|
||||
found = True
|
||||
|
||||
|
||||
if not found:
|
||||
res['results'].append('%s is not installed' % spec)
|
||||
continue
|
||||
|
@ -250,7 +250,7 @@ def remove(module, items, repoq, yum_basecmd):
|
|||
|
||||
cmd = "%s remove '%s'" % (yum_basecmd, pkg)
|
||||
rc, out, err = run(cmd)
|
||||
|
||||
|
||||
# FIXME if we ran the remove - check to make sure it actually removed :(
|
||||
# look for the pkg in the rpmdb - this is notoriously hard for groups :(
|
||||
if rc != 0:
|
||||
|
@ -264,7 +264,7 @@ def remove(module, items, repoq, yum_basecmd):
|
|||
res['rc'] = 0
|
||||
res['results'].append(out)
|
||||
res['msg'] += err
|
||||
|
||||
|
||||
module.exit_json(**res)
|
||||
|
||||
def latest(module, items, repoq, yum_basecmd):
|
||||
|
@ -273,7 +273,7 @@ def latest(module, items, repoq, yum_basecmd):
|
|||
res['msg'] = ''
|
||||
res['changed'] = False
|
||||
res['rc'] = 0
|
||||
|
||||
|
||||
for spec in items:
|
||||
pkg = None
|
||||
|
||||
|
@ -296,11 +296,11 @@ def latest(module, items, repoq, yum_basecmd):
|
|||
found = True
|
||||
else:
|
||||
nothing_to_do = True
|
||||
|
||||
|
||||
if nothing_to_do:
|
||||
res['results'].append("All packages providing %s are up to date" % spec)
|
||||
continue
|
||||
|
||||
|
||||
if not found:
|
||||
basecmd = 'install'
|
||||
else:
|
||||
|
@ -333,16 +333,16 @@ def latest(module, items, repoq, yum_basecmd):
|
|||
res['rc'] = 0
|
||||
res['results'].append(out)
|
||||
res['msg'] += err
|
||||
|
||||
|
||||
module.exit_json(**res)
|
||||
|
||||
|
||||
|
||||
|
||||
def ensure(module, state, pkgspec, conf_file):
|
||||
res = {}
|
||||
stdout = ""
|
||||
stderr = ""
|
||||
|
||||
|
||||
# take multiple args comma separated
|
||||
items = [pkgspec]
|
||||
if pkgspec.find(',') != -1:
|
||||
|
@ -361,9 +361,9 @@ def ensure(module, state, pkgspec, conf_file):
|
|||
elif state == 'latest':
|
||||
latest(module, items, repoq, yum_basecmd)
|
||||
|
||||
# should be caught by AnsibleModule argument_spec
|
||||
# should be caught by AnsibleModule argument_spec
|
||||
return dict(changed=False, failed=True, results='', errors='unexpected state')
|
||||
|
||||
|
||||
|
||||
def remove_only(pkgspec):
|
||||
# remove this pkg and only this pkg - fail if it will require more to remove
|
||||
|
@ -380,7 +380,7 @@ def main():
|
|||
# list=available
|
||||
# list=repos
|
||||
# list=pkgspec
|
||||
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
pkg=dict(aliases=['name']),
|
||||
|
@ -400,11 +400,11 @@ def main():
|
|||
|
||||
if not os.path.exists(repoquery):
|
||||
module.fail_json(msg="%s is required to run this module. Please install the yum-utils package." % repoquery)
|
||||
|
||||
|
||||
if params['list']:
|
||||
results = dict(results=list_stuff(params['conf_file'], params['list']))
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
else:
|
||||
pkg = params['pkg']
|
||||
if 'pkg' is None:
|
||||
|
|
Loading…
Reference in a new issue