Merge branch 'integration'

Conflicts:
	lib/ansible/playbook.py
	lib/ansible/runner.py
	library/apt
This commit is contained in:
Michael DeHaan 2012-04-23 21:05:06 -04:00
commit c00699d0ef
46 changed files with 1673 additions and 578 deletions

View file

@ -1,5 +1,6 @@
include README.md ansible.spec include README.md packaging/rpm/ansible.spec
include examples/hosts include examples/hosts
include packaging/distutils/setup.py
recursive-include docs * recursive-include docs *
recursive-include library * recursive-include library *
include Makefile include Makefile

View file

@ -1,25 +1,59 @@
#!/usr/bin/make #!/usr/bin/make
########################################################
# Makefile for Ansible
#
# useful targets:
# make sdist ---------------- produce a tarball
# make rpm ----------------- produce RPMs
# make debian --------------- produce a dpkg (FIXME?)
# make docs ----------------- rebuild the manpages (results are checked in)
# make tests ---------------- run the tests
# make pyflakes, make pep8 -- source code checks
########################################################
# variable section
NAME = "ansible" NAME = "ansible"
# Manpages are currently built with asciidoc -- would like to move to markdown
# This doesn't evaluate until it's called. The -D argument is the
# directory of the target file ($@), kinda like `dirname`.
ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $< ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $<
ASCII2HTMLMAN = a2x -D docs/html/man/ -d manpage -f xhtml ASCII2HTMLMAN = a2x -D docs/html/man/ -d manpage -f xhtml
MANPAGES := docs/man/man1/ansible.1 docs/man/man1/ansible-playbook.1 MANPAGES := docs/man/man1/ansible.1 docs/man/man1/ansible-playbook.1
SITELIB = $(shell python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()") SITELIB = $(shell python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")
RPMVERSION := $(shell awk '/Version/{print $$2; exit}' < ansible.spec | cut -d "%" -f1)
RPMRELEASE := $(shell awk '/Release/{print $$2; exit}' < ansible.spec | cut -d "%" -f1) # VERSION file provides one place to update the software version
RPMNVR = "$(NAME)-$(RPMVERSION)-$(RPMRELEASE)" VERSION := $(shell cat VERSION)
# RPM build parameters
RPMSPECDIR= packaging/rpm
RPMSPEC = $(RPMSPECDIR)/ansible.spec
RPMVERSION := $(shell awk '/Version/{print $$2; exit}' < $(RPMSPEC) | cut -d "%" -f1)
RPMRELEASE := $(shell awk '/Release/{print $$2; exit}' < $(RPMSPEC) | cut -d "%" -f1)
RPMDIST = $(shell rpm --eval '%dist')
RPMNVR = "$(NAME)-$(RPMVERSION)-$(RPMRELEASE)$(RPMDIST)"
########################################################
all: clean python all: clean python
tests: tests:
PYTHONPATH=./lib nosetests -v PYTHONPATH=./lib nosetests -v
# To force a rebuild of the docs run 'touch VERSION && make docs'
docs: $(MANPAGES) docs: $(MANPAGES)
%.1: %.1.asciidoc # Regenerate %.1.asciidoc if %.1.asciidoc.in has been modified more
$(ASCII2MAN) # recently than %.1.asciidoc.
%.1.asciidoc: %.1.asciidoc.in
sed "s/%VERSION%/$(VERSION)/" $< > $@
%.5: %.5.asciidoc # Regenerate %.1 if %.1.asciidoc or VERSION has been modified more
# recently than %.1. (Implicitly runs the %.1.asciidoc recipe)
%.1: %.1.asciidoc VERSION
$(ASCII2MAN) $(ASCII2MAN)
loc: loc:
@ -29,26 +63,30 @@ pep8:
@echo "#############################################" @echo "#############################################"
@echo "# Running PEP8 Compliance Tests" @echo "# Running PEP8 Compliance Tests"
@echo "#############################################" @echo "#############################################"
pep8 -r --ignore=E501,E221,W291,W391,E302,E251,E203,W293,E231,E303,E201,E225 lib/ bin/ pep8 -r --ignore=E501,E221,W291,W391,E302,E251,E203,W293,E231,E303,E201,E225,E261 lib/ bin/
pyflakes: pyflakes:
pyflakes lib/ansible/*.py bin/* pyflakes lib/ansible/*.py bin/*
clean: clean:
@echo "Cleaning up distutils stuff" @echo "Cleaning up distutils stuff"
-rm -rf build rm -rf build
-rm -rf dist rm -rf dist
@echo "Cleaning up byte compiled python stuff" @echo "Cleaning up byte compiled python stuff"
find . -regex ".*\.py[co]$$" -delete find . -type f -regex ".*\.py[co]$$" -delete
@echo "Cleaning up editor backup files" @echo "Cleaning up editor backup files"
find . -type f \( -name "*~" -or -name "#*" \) -delete find . -type f \( -name "*~" -or -name "#*" \) -delete
find . -type f \( -name "*.swp" \) -delete find . -type f \( -name "*.swp" \) -delete
@echo "Cleaning up asciidoc to man transformations and results" @echo "Cleaning up asciidoc to man transformations and results"
find ./docs/man -type f -name "*.xml" -delete find ./docs/man -type f -name "*.xml" -delete
find ./docs/man -type f -name "*.asciidoc" -delete
@echo "Cleaning up output from test runs" @echo "Cleaning up output from test runs"
-rm -rf test/test_data rm -rf test/test_data
@echo "Cleaning up RPM building stuff" @echo "Cleaning up RPM building stuff"
-rm -rf MANIFEST rpm-build rm -rf MANIFEST rpm-build
@echo "Cleaning up Debian building stuff"
rm -rf debian
rm -rf deb-build
python: python:
python setup.py build python setup.py build
@ -59,7 +97,7 @@ install:
python setup.py install python setup.py install
sdist: clean sdist: clean
python ./setup.py sdist python setup.py sdist -t MANIFEST.in
rpmcommon: sdist rpmcommon: sdist
@mkdir -p rpm-build @mkdir -p rpm-build
@ -70,9 +108,9 @@ srpm: rpmcommon
--define "_builddir %{_topdir}" \ --define "_builddir %{_topdir}" \
--define "_rpmdir %{_topdir}" \ --define "_rpmdir %{_topdir}" \
--define "_srcrpmdir %{_topdir}" \ --define "_srcrpmdir %{_topdir}" \
--define "_specdir %{_topdir}" \ --define "_specdir $(RPMSPECDIR)" \
--define "_sourcedir %{_topdir}" \ --define "_sourcedir %{_topdir}" \
-bs ansible.spec -bs $(RPMSPEC)
@echo "#############################################" @echo "#############################################"
@echo "Ansible SRPM is built:" @echo "Ansible SRPM is built:"
@echo " rpm-build/$(RPMNVR).src.rpm" @echo " rpm-build/$(RPMNVR).src.rpm"
@ -83,13 +121,21 @@ rpm: rpmcommon
--define "_builddir %{_topdir}" \ --define "_builddir %{_topdir}" \
--define "_rpmdir %{_topdir}" \ --define "_rpmdir %{_topdir}" \
--define "_srcrpmdir %{_topdir}" \ --define "_srcrpmdir %{_topdir}" \
--define "_specdir %{_topdir}" \ --define "_specdir $(RPMSPECDIR)" \
--define "_sourcedir %{_topdir}" \ --define "_sourcedir %{_topdir}" \
-ba ansible.spec -ba $(RPMSPEC)
@echo "#############################################" @echo "#############################################"
@echo "Ansible RPM is built:" @echo "Ansible RPM is built:"
@echo " rpm-build/noarch/$(RPMNVR).noarch.rpm" @echo " rpm-build/noarch/$(RPMNVR).noarch.rpm"
@echo "#############################################" @echo "#############################################"
.PHONEY: docs manual clean pep8 debian: sdist
vpath %.asciidoc docs/man/man1 deb: debian
cp -r packaging/debian ./
chmod 755 debian/rules
fakeroot debian/rules clean
fakeroot dh_install
fakeroot debian/rules binary
# for arch or gentoo, read instructions in the appropriate 'packaging' subdirectory directory

1
VERSION Normal file
View file

@ -0,0 +1 @@
0.0.2

View file

@ -22,13 +22,13 @@
import sys import sys
import getpass import getpass
import time import time
from optparse import OptionParser
import ansible.runner import ansible.runner
import ansible.constants as C import ansible.constants as C
from ansible import utils from ansible import utils
from ansible import errors from ansible import errors
from ansible import callbacks from ansible import callbacks
from ansible import inventory
######################################################## ########################################################
@ -47,7 +47,7 @@ class Cli(object):
def parse(self): def parse(self):
''' create an options parser for bin/ansible ''' ''' create an options parser for bin/ansible '''
parser = utils.base_parser(constants=C, port_opts=True, runas_opts=True, async_opts=True, parser = utils.base_parser(constants=C, runas_opts=True, async_opts=True,
output_opts=True, connect_opts=True, usage='%prog <host-pattern> [options]') output_opts=True, connect_opts=True, usage='%prog <host-pattern> [options]')
parser.add_option('-a', '--args', dest='module_args', parser.add_option('-a', '--args', dest='module_args',
help="module arguments", default=C.DEFAULT_MODULE_ARGS) help="module arguments", default=C.DEFAULT_MODULE_ARGS)
@ -69,6 +69,13 @@ class Cli(object):
''' use Runner lib to do SSH things ''' ''' use Runner lib to do SSH things '''
pattern = args[0] pattern = args[0]
inventory_manager = inventory.Inventory(options.inventory)
hosts = inventory_manager.list_hosts(pattern)
if len(hosts) == 0:
print >>sys.stderr, "No hosts matched"
sys.exit(1)
sshpass = None sshpass = None
sudopass = None sudopass = None
if options.ask_pass: if options.ask_pass:
@ -78,7 +85,6 @@ class Cli(object):
if options.tree: if options.tree:
utils.prepare_writeable_dir(options.tree) utils.prepare_writeable_dir(options.tree)
if options.seconds: if options.seconds:
print "background launch...\n\n" print "background launch...\n\n"
@ -86,11 +92,11 @@ class Cli(object):
module_name=options.module_name, module_path=options.module_path, module_name=options.module_name, module_path=options.module_path,
module_args=options.module_args, module_args=options.module_args,
remote_user=options.remote_user, remote_pass=sshpass, remote_user=options.remote_user, remote_pass=sshpass,
host_list=options.inventory, timeout=options.timeout, inventory=inventory_manager, timeout=options.timeout,
remote_port=options.remote_port, forks=options.forks, forks=options.forks,
background=options.seconds, pattern=pattern, background=options.seconds, pattern=pattern,
callbacks=self.callbacks, sudo=options.sudo, callbacks=self.callbacks, sudo=options.sudo,
sudo_pass=sudopass, verbose=True, sudo_pass=sudopass,
transport=options.connection, debug=options.debug transport=options.connection, debug=options.debug
) )
return (runner, runner.run()) return (runner, runner.run())
@ -98,14 +104,13 @@ class Cli(object):
# ---------------------------------------------- # ----------------------------------------------
def get_polling_runner(self, old_runner, hosts, jid): def get_polling_runner(self, old_runner, jid):
return ansible.runner.Runner( return ansible.runner.Runner(
module_name='async_status', module_path=old_runner.module_path, module_name='async_status', module_path=old_runner.module_path,
module_args="jid=%s" % jid, remote_user=old_runner.remote_user, module_args="jid=%s" % jid, remote_user=old_runner.remote_user,
remote_pass=old_runner.remote_pass, host_list=hosts, remote_pass=old_runner.remote_pass, inventory=old_runner.inventory,
timeout=old_runner.timeout, forks=old_runner.forks, timeout=old_runner.timeout, forks=old_runner.forks,
remote_port=old_runner.remote_port, pattern='*', pattern='*', callbacks=self.silent_callbacks,
callbacks=self.silent_callbacks, verbose=True,
) )
# ---------------------------------------------- # ----------------------------------------------
@ -138,8 +143,10 @@ class Cli(object):
clock = options.seconds clock = options.seconds
while (clock >= 0): while (clock >= 0):
polling_runner = self.get_polling_runner(runner, poll_hosts, jid) runner.inventory.restrict_to(poll_hosts)
polling_runner = self.get_polling_runner(runner, jid)
poll_results = polling_runner.run() poll_results = polling_runner.run()
runner.inventory.lift_restriction()
if poll_results is None: if poll_results is None:
break break
for (host, host_result) in poll_results['contacted'].iteritems(): for (host, host_result) in poll_results['contacted'].iteritems():

View file

@ -20,7 +20,6 @@
import sys import sys
import getpass import getpass
from optparse import OptionParser
import ansible.playbook import ansible.playbook
import ansible.constants as C import ansible.constants as C
@ -33,9 +32,7 @@ def main(args):
# create parser for CLI options # create parser for CLI options
usage = "%prog playbook.yml" usage = "%prog playbook.yml"
parser = utils.base_parser(constants=C, usage=usage, connect_opts=True) parser = utils.base_parser(constants=C, usage=usage, connect_opts=True, runas_opts=True)
parser.add_option('-e', '--extra-vars', dest='extra_vars',
help='arguments to pass to the inventory script')
parser.add_option('-O', '--override-hosts', dest="override_hosts", default=None, parser.add_option('-O', '--override-hosts', dest="override_hosts", default=None,
help="run playbook against these hosts regardless of inventory settings") help="run playbook against these hosts regardless of inventory settings")
@ -63,13 +60,20 @@ def main(args):
runner_cb = callbacks.PlaybookRunnerCallbacks(stats) runner_cb = callbacks.PlaybookRunnerCallbacks(stats)
pb = ansible.playbook.PlayBook( pb = ansible.playbook.PlayBook(
playbook=playbook,module_path=options.module_path, playbook=playbook,
host_list=options.inventory, override_hosts=override_hosts, module_path=options.module_path,
extra_vars=options.extra_vars, host_list=options.inventory,
forks=options.forks, debug=options.debug, verbose=True, override_hosts=override_hosts,
forks=options.forks,
debug=options.debug,
remote_user=options.remote_user,
remote_pass=sshpass, remote_pass=sshpass,
callbacks=playbook_cb, runner_callbacks=runner_cb, stats=stats, callbacks=playbook_cb,
timeout=options.timeout, transport=options.connection, runner_callbacks=runner_cb,
stats=stats,
timeout=options.timeout,
transport=options.connection,
sudo=options.sudo,
sudo_pass=sudopass sudo_pass=sudopass
) )
try: try:

1
docs/man/.gitignore vendored
View file

@ -1 +1,2 @@
*.xml *.xml
*.asciidoc

View file

@ -1,13 +1,22 @@
'\" t '\" t
.\" Title: ansible-playbook .\" Title: ansible-playbook
.\" Author: [see the "AUTHOR" section] .\" Author: [see the "AUTHOR" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/> .\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
.\" Date: 04/13/2012 .\" Date: 04/17/2012
.\" Manual: System administration commands .\" Manual: System administration commands
.\" Source: Ansible 0.0.2 .\" Source: Ansible 0.0.2
.\" Language: English .\" Language: English
.\" .\"
.TH "ANSIBLE\-PLAYBOOK" "1" "04/13/2012" "Ansible 0\&.0\&.2" "System administration commands" .TH "ANSIBLE\-PLAYBOOK" "1" "04/17/2012" "Ansible 0\&.0\&.2" "System administration commands"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.\" http://bugs.debian.org/507673
.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.ie \n(.g .ds Aq \(aq
.el .ds Aq '
.\" ----------------------------------------------------------------- .\" -----------------------------------------------------------------
.\" * set default formatting .\" * set default formatting
.\" ----------------------------------------------------------------- .\" -----------------------------------------------------------------
@ -77,17 +86,22 @@ Connection timeout to use when trying to talk to hosts, in
\fISECONDS\fR\&. \fISECONDS\fR\&.
.RE .RE
.PP .PP
\fB\-e\fR \fIEXTRA_VARS\fR, \fB\-\-extra_vars=\fR\fIEXTRA_VARS\fR
.RS 4
An additional list of space delimited key=value pairs to pass into the playbook that are not declared in the vars section of the playbook\&.
.RE
.PP
\fB\-O\fR \fIOVERRIDE_HOSTS\fR, \fB\-\-override\-hosts=\fR\fIOVERRIDE_HOSTS\fR \fB\-O\fR \fIOVERRIDE_HOSTS\fR, \fB\-\-override\-hosts=\fR\fIOVERRIDE_HOSTS\fR
.RS 4 .RS 4
Ignore the inventory file and run the playbook against only these hosts\&. "hosts:" line in playbook should be set to Ignore the inventory file and run the playbook against only these hosts\&. "hosts:" line in playbook should be set to
\fIall\fR \fIall\fR
when using this option\&. when using this option\&.
.RE .RE
.PP
\fB\-s\fR, \fB\-\-sudo\fR
.RS 4
Force all plays to use sudo, even if not marked as such\&.
.RE
.PP
\fB\-u\fR \fIUSERNAME\fR, \fB\-\-remote\-user=\fR\fIUSERNAME\fR
.RS 4
Use this remote user name on playbook steps that do not indicate a user name to run as\&.
.RE
.SH "ENVIRONMENT" .SH "ENVIRONMENT"
.sp .sp
The following environment variables may specified\&. The following environment variables may specified\&.

View file

@ -2,7 +2,7 @@ ansible-playbook(1)
=================== ===================
:doctype:manpage :doctype:manpage
:man source: Ansible :man source: Ansible
:man version: 0.0.2 :man version: %VERSION%
:man manual: System administration commands :man manual: System administration commands
NAME NAME
@ -69,18 +69,22 @@ Prompt for the password to use for playbook plays that request sudo access, if a
Connection timeout to use when trying to talk to hosts, in 'SECONDS'. Connection timeout to use when trying to talk to hosts, in 'SECONDS'.
*-e* 'EXTRA_VARS', *--extra_vars=*'EXTRA_VARS'::
An additional list of space delimited key=value pairs to pass into the playbook that are not
declared in the vars section of the playbook.
*-O* 'OVERRIDE_HOSTS', *--override-hosts=*'OVERRIDE_HOSTS':: *-O* 'OVERRIDE_HOSTS', *--override-hosts=*'OVERRIDE_HOSTS'::
Ignore the inventory file and run the playbook against only these hosts. "hosts:" line Ignore the inventory file and run the playbook against only these hosts. "hosts:" line
in playbook should be set to 'all' when using this option. in playbook should be set to 'all' when using this option.
*-s*, *--sudo*::
Force all plays to use sudo, even if not marked as such.
*-u* 'USERNAME', *--remote-user=*'USERNAME'::
Use this remote user name on playbook steps that do not indicate a user name to run as.
ENVIRONMENT ENVIRONMENT
----------- -----------

View file

@ -1,13 +1,22 @@
'\" t '\" t
.\" Title: ansible .\" Title: ansible
.\" Author: [see the "AUTHOR" section] .\" Author: [see the "AUTHOR" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/> .\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
.\" Date: 04/13/2012 .\" Date: 04/17/2012
.\" Manual: System administration commands .\" Manual: System administration commands
.\" Source: Ansible 0.0.2 .\" Source: Ansible 0.0.2
.\" Language: English .\" Language: English
.\" .\"
.TH "ANSIBLE" "1" "04/13/2012" "Ansible 0\&.0\&.2" "System administration commands" .TH "ANSIBLE" "1" "04/17/2012" "Ansible 0\&.0\&.2" "System administration commands"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.\" http://bugs.debian.org/507673
.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.ie \n(.g .ds Aq \(aq
.el .ds Aq '
.\" ----------------------------------------------------------------- .\" -----------------------------------------------------------------
.\" * set default formatting .\" * set default formatting
.\" ----------------------------------------------------------------- .\" -----------------------------------------------------------------
@ -25,7 +34,7 @@ ansible \- run a command somewhere else
ansible <host\-pattern> [\-f forks] [\-m module_name] [\-a args] ansible <host\-pattern> [\-f forks] [\-m module_name] [\-a args]
.SH "DESCRIPTION" .SH "DESCRIPTION"
.sp .sp
\fBAnsible\fR is an extra\-simple tool/framework/API for doing \'remote things\' over SSH\&. \fBAnsible\fR is an extra\-simple tool/framework/API for doing \*(Aqremote things\*(Aq over SSH\&.
.SH "ARGUMENTS" .SH "ARGUMENTS"
.PP .PP
\fBhost\-pattern\fR \fBhost\-pattern\fR
@ -63,56 +72,79 @@ to load modules from\&. The default is
\fI/usr/share/ansible\fR\&. \fI/usr/share/ansible\fR\&.
.RE .RE
.PP .PP
\fB\-a\fR \'\fIARGUMENTS\fR\', \fB\-\-args=\fR\'\fIARGUMENTS\fR\' \fB\-a\fR \*(Aq\fIARGUMENTS\fR\*(Aq, \fB\-\-args=\fR\*(Aq\fIARGUMENTS\fR\*(Aq
.RS 4 .RS 4
The The
\fIARGUMENTS\fR \fIARGUMENTS\fR
to pass to the module\&. to pass to the module\&.
.RE .RE
.sp .PP
\fB\-D\fR, \fB\-\-debug\fR \fB\-D\fR, \fB\-\-debug\fR
.sp .RS 4
Print any messages the remote module sends to standard error to the console Print any messages the remote module sends to standard error to the console
.sp .RE
.PP
\fB\-k\fR, \fB\-\-ask\-pass\fR \fB\-k\fR, \fB\-\-ask\-pass\fR
.sp .RS 4
Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&. Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&.
.sp .RE
.PP
\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR \fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR
.sp .RS 4
Prompt for the password to use with \-\-sudo, if any Prompt for the password to use with \-\-sudo, if any
.sp .RE
.PP
\fB\-o\fR, \fB\-\-one\-line\fR \fB\-o\fR, \fB\-\-one\-line\fR
.sp .RS 4
Try to output everything on one line\&. Try to output everything on one line\&.
.sp .RE
.PP
\fB\-s\fR, \fB\-\-sudo\fR \fB\-s\fR, \fB\-\-sudo\fR
.sp .RS 4
Run the command as the user given by \-u and sudo to root\&. Run the command as the user given by \-u and sudo to root\&.
.sp .RE
.PP
\fB\-t\fR \fIDIRECTORY\fR, \fB\-\-tree=\fR\fIDIRECTORY\fR \fB\-t\fR \fIDIRECTORY\fR, \fB\-\-tree=\fR\fIDIRECTORY\fR
.sp .RS 4
Save contents in this output \fIDIRECTORY\fR, with the results saved in a file named after each host\&. Save contents in this output
.sp \fIDIRECTORY\fR, with the results saved in a file named after each host\&.
.RE
.PP
\fB\-T\fR \fISECONDS\fR, \fB\-\-timeout=\fR\fISECONDS\fR \fB\-T\fR \fISECONDS\fR, \fB\-\-timeout=\fR\fISECONDS\fR
.sp .RS 4
Connection timeout to use when trying to talk to hosts, in \fISECONDS\fR\&. Connection timeout to use when trying to talk to hosts, in
.sp \fISECONDS\fR\&.
.RE
.PP
\fB\-B\fR \fINUM\fR, \fB\-\-background=\fR\fINUM\fR \fB\-B\fR \fINUM\fR, \fB\-\-background=\fR\fINUM\fR
.sp .RS 4
Run commands in the background, killing the task after \fINUM\fR seconds\&. Run commands in the background, killing the task after
.sp \fINUM\fR
seconds\&.
.RE
.PP
\fB\-P\fR \fINUM\fR, \fB\-\-poll=\fR\fINUM\fR \fB\-P\fR \fINUM\fR, \fB\-\-poll=\fR\fINUM\fR
.sp .RS 4
Poll a background job every \fINUM\fR seconds\&. Requires \fB\-B\fR\&. Poll a background job every
.sp \fINUM\fR
seconds\&. Requires
\fB\-B\fR\&.
.RE
.PP
\fB\-u\fR \fIUSERNAME\fR, \fB\-\-remote\-user=\fR\fIUSERNAME\fR \fB\-u\fR \fIUSERNAME\fR, \fB\-\-remote\-user=\fR\fIUSERNAME\fR
.sp .RS 4
Use this remote \fIUSERNAME\fR instead of root\&. Use this remote
.sp \fIUSERNAME\fR
instead of root\&.
.RE
.PP
\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR \fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR
.sp .RS 4
Connection type to use\&. Possible options are \fIparamiko\fR (SSH) and \fIlocal\fR\&. Local is mostly useful for crontab or kickstarts\&. Connection type to use\&. Possible options are
\fIparamiko\fR
(SSH) and
\fIlocal\fR\&. Local is mostly useful for crontab or kickstarts\&.
.RE
.SH "INVENTORY" .SH "INVENTORY"
.sp .sp
Ansible stores the hosts it can potentially operate on in an inventory file\&. The syntax is one host per line\&. Groups headers are allowed and are included on their own line, enclosed in square brackets\&. Ansible stores the hosts it can potentially operate on in an inventory file\&. The syntax is one host per line\&. Groups headers are allowed and are included on their own line, enclosed in square brackets\&.

View file

@ -2,7 +2,7 @@ ansible(1)
========= =========
:doctype:manpage :doctype:manpage
:man source: Ansible :man source: Ansible
:man version: 0.0.2 :man version: %VERSION%
:man manual: System administration commands :man manual: System administration commands
NAME NAME
@ -60,48 +60,48 @@ The 'DIRECTORY' to load modules from. The default is '/usr/share/ansible'.
The 'ARGUMENTS' to pass to the module. The 'ARGUMENTS' to pass to the module.
*-D*, *--debug* *-D*, *--debug*::
Print any messages the remote module sends to standard error to the console Print any messages the remote module sends to standard error to the console
*-k*, *--ask-pass* *-k*, *--ask-pass*::
Prompt for the SSH password instead of assuming key-based authentication with ssh-agent. Prompt for the SSH password instead of assuming key-based authentication with ssh-agent.
*-K*, *--ask-sudo-pass* *-K*, *--ask-sudo-pass*::
Prompt for the password to use with --sudo, if any Prompt for the password to use with --sudo, if any
*-o*, *--one-line* *-o*, *--one-line*::
Try to output everything on one line. Try to output everything on one line.
*-s*, *--sudo* *-s*, *--sudo*::
Run the command as the user given by -u and sudo to root. Run the command as the user given by -u and sudo to root.
*-t* 'DIRECTORY', *--tree=*'DIRECTORY' *-t* 'DIRECTORY', *--tree=*'DIRECTORY'::
Save contents in this output 'DIRECTORY', with the results saved in a Save contents in this output 'DIRECTORY', with the results saved in a
file named after each host. file named after each host.
*-T* 'SECONDS', *--timeout=*'SECONDS' *-T* 'SECONDS', *--timeout=*'SECONDS'::
Connection timeout to use when trying to talk to hosts, in 'SECONDS'. Connection timeout to use when trying to talk to hosts, in 'SECONDS'.
*-B* 'NUM', *--background=*'NUM' *-B* 'NUM', *--background=*'NUM'::
Run commands in the background, killing the task after 'NUM' seconds. Run commands in the background, killing the task after 'NUM' seconds.
*-P* 'NUM', *--poll=*'NUM' *-P* 'NUM', *--poll=*'NUM'::
Poll a background job every 'NUM' seconds. Requires *-B*. Poll a background job every 'NUM' seconds. Requires *-B*.
*-u* 'USERNAME', *--remote-user=*'USERNAME' *-u* 'USERNAME', *--remote-user=*'USERNAME'::
Use this remote 'USERNAME' instead of root. Use this remote 'USERNAME' instead of root.
*-c* 'CONNECTION', *--connection=*'CONNECTION' *-c* 'CONNECTION', *--connection=*'CONNECTION'::
Connection type to use. Possible options are 'paramiko' (SSH) and 'local'. Connection type to use. Possible options are 'paramiko' (SSH) and 'local'.
Local is mostly useful for crontab or kickstarts. Local is mostly useful for crontab or kickstarts.

View file

@ -0,0 +1,18 @@
---
# This is a demo of how to manage the selinux context using the file module
- hosts: test
user: root
tasks:
- name: Change setype of /etc/exports to non-default value
action: file path=/etc/exports setype=etc_t
- name: Change seuser of /etc/exports to non-default value
action: file path=/etc/exports seuser=unconfined_u
- name: Set selinux context back to default value
action: file path=/etc/exports context=default
- name: Create empty file
action: command /bin/touch /tmp/foo
- name: Change setype of /tmp/foo
action: file path=/tmp/foo setype=default_t
- name: Try to set secontext to default, but this will fail
because of the lack of a default in the policy
action: file path=/tmp/foo context=default

View file

@ -6,6 +6,3 @@ To use it from the root of a checkout:
$ . ./hacking/env-setup $ . ./hacking/env-setup
Note the space between the '.' and the './' Note the space between the '.' and the './'
Man pages will not load until you run 'make docs' from the root of the
checkout.

View file

@ -4,14 +4,17 @@
PREFIX_PYTHONPATH="$PWD/lib" PREFIX_PYTHONPATH="$PWD/lib"
PREFIX_PATH="$PWD/bin" PREFIX_PATH="$PWD/bin"
PREFIX_MANPATH="$PWD/docs/man"
export PYTHONPATH=$PREFIX_PYTHONPATH:$PYTHONPATH export PYTHONPATH=$PREFIX_PYTHONPATH:$PYTHONPATH
export PATH=$PREFIX_PATH:$PATH export PATH=$PREFIX_PATH:$PATH
export ANSIBLE_LIBRARY="$PWD/library" export ANSIBLE_LIBRARY="$PWD/library"
export MANPATH=$PREFIX_MANPATH:$MANPATH
echo "PATH=$PATH" echo "PATH=$PATH"
echo "PYTHONPATH=$PYTHONPATH" echo "PYTHONPATH=$PYTHONPATH"
echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY" echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY"
echo "MANPATH=$MANPATH"
echo "reminder: specify your host file with -i" echo "Reminder: specify your host file with -i"
echo "done." echo "Done."

View file

@ -30,7 +30,7 @@ import sys
import os import os
import subprocess import subprocess
import traceback import traceback
import ansible.utils from ansible import utils
try: try:
import json import json
@ -70,7 +70,7 @@ try:
print "***********************************" print "***********************************"
print "RAW OUTPUT" print "RAW OUTPUT"
print out print out
results = ansible.utils.parse_json(out) results = utils.parse_json(out)
except: except:
print "***********************************" print "***********************************"
@ -82,7 +82,7 @@ except:
print "***********************************" print "***********************************"
print "PARSED OUTPUT" print "PARSED OUTPUT"
print results print utils.bigjson(results)
sys.exit(0) sys.exit(0)

View file

@ -151,7 +151,7 @@ class PlaybookRunnerCallbacks(DefaultRunnerCallbacks):
print "failed: [%s] => %s => %s\n" % (host, invocation, utils.smjson(results)) print "failed: [%s] => %s => %s\n" % (host, invocation, utils.smjson(results))
def on_ok(self, host, host_result): def on_ok(self, host, host_result):
invocation = host_result.get('invocation',None) invocation = host_result.get('invocation','')
if invocation.startswith('async_status'): if invocation.startswith('async_status'):
pass pass
elif not invocation or invocation.startswith('setup '): elif not invocation or invocation.startswith('setup '):

View file

@ -45,12 +45,12 @@ class Connection(object):
self.runner = runner self.runner = runner
self.transport = transport self.transport = transport
def connect(self, host): def connect(self, host, port=None):
conn = None conn = None
if self.transport == 'local' and self._LOCALHOSTRE.search(host): if self.transport == 'local' and self._LOCALHOSTRE.search(host):
conn = LocalConnection(self.runner, host) conn = LocalConnection(self.runner, host, None)
elif self.transport == 'paramiko': elif self.transport == 'paramiko':
conn = ParamikoConnection(self.runner, host) conn = ParamikoConnection(self.runner, host, port)
if conn is None: if conn is None:
raise Exception("unsupported connection type") raise Exception("unsupported connection type")
return conn.connect() return conn.connect()
@ -64,10 +64,13 @@ class Connection(object):
class ParamikoConnection(object): class ParamikoConnection(object):
''' SSH based connections with Paramiko ''' ''' SSH based connections with Paramiko '''
def __init__(self, runner, host): def __init__(self, runner, host, port=None):
self.ssh = None self.ssh = None
self.runner = runner self.runner = runner
self.host = host self.host = host
self.port = port
if port is None:
self.port = self.runner.remote_port
def _get_conn(self): def _get_conn(self):
ssh = paramiko.SSHClient() ssh = paramiko.SSHClient()
@ -75,9 +78,13 @@ class ParamikoConnection(object):
try: try:
ssh.connect( ssh.connect(
self.host, username=self.runner.remote_user, self.host,
allow_agent=True, look_for_keys=True, password=self.runner.remote_pass, username=self.runner.remote_user,
timeout=self.runner.timeout, port=self.runner.remote_port allow_agent=True,
look_for_keys=True,
password=self.runner.remote_pass,
timeout=self.runner.timeout,
port=self.port
) )
except Exception, e: except Exception, e:
if str(e).find("PID check failed") != -1: if str(e).find("PID check failed") != -1:
@ -183,7 +190,7 @@ class LocalConnection(object):
self.runner = runner self.runner = runner
self.host = host self.host = host
def connect(self): def connect(self, port=None):
''' connect to the local host; nothing to do here ''' ''' connect to the local host; nothing to do here '''
return self return self

292
lib/ansible/inventory.py Normal file
View file

@ -0,0 +1,292 @@
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import fnmatch
import os
import subprocess
import constants as C
from ansible import errors
from ansible import utils
class Inventory(object):
""" Host inventory for ansible.
The inventory is either a simple text file with systems and [groups] of
systems, or a script that will be called with --list or --host.
"""
def __init__(self, host_list=C.DEFAULT_HOST_LIST):
self._restriction = None
self._variables = {}
if type(host_list) == list:
self.host_list = host_list
self.groups = dict(ungrouped=host_list)
self._is_script = False
return
inventory_file = os.path.expanduser(host_list)
if not os.path.exists(inventory_file):
raise errors.AnsibleFileNotFound("inventory file not found: %s" % host_list)
self.inventory_file = os.path.abspath(inventory_file)
if os.access(self.inventory_file, os.X_OK):
self.host_list, self.groups = self._parse_from_script()
self._is_script = True
else:
self.host_list, self.groups = self._parse_from_file()
self._is_script = False
# *****************************************************
# Public API
def list_hosts(self, pattern="all"):
""" Return a list of hosts [matching the pattern] """
if self._restriction is None:
host_list = self.host_list
else:
host_list = [ h for h in self.host_list if h in self._restriction ]
return [ h for h in host_list if self._matches(h, pattern) ]
def restrict_to(self, restriction):
""" Restrict list operations to the hosts given in restriction """
if type(restriction)!=list:
restriction = [ restriction ]
self._restriction = restriction
def lift_restriction(self):
""" Do not restrict list operations """
self._restriction = None
def get_variables(self, host):
""" Return the variables associated with this host. """
if host in self._variables:
return self._variables[host].copy()
if not self._is_script:
return {}
return self._get_variables_from_script(host)
# *****************************************************
def _parse_from_file(self):
''' parse a textual host file '''
results = []
groups = dict(ungrouped=[])
lines = file(self.inventory_file).read().split("\n")
if "---" in lines:
return self._parse_yaml()
group_name = 'ungrouped'
for item in lines:
item = item.lstrip().rstrip()
if item.startswith("#"):
# ignore commented out lines
pass
elif item.startswith("["):
# looks like a group
group_name = item.replace("[","").replace("]","").lstrip().rstrip()
groups[group_name] = []
elif item != "":
# looks like a regular host
if ":" in item:
# a port was specified
item, port = item.split(":")
try:
port = int(port)
except ValueError:
raise errors.AnsibleError("SSH port for %s in inventory (%s) should be numerical."%(item, port))
self._set_variable(item, "ansible_ssh_port", port)
groups[group_name].append(item)
if not item in results:
results.append(item)
return (results, groups)
# *****************************************************
def _parse_from_script(self):
''' evaluate a script that returns list of hosts by groups '''
results = []
groups = dict(ungrouped=[])
cmd = [self.inventory_file, '--list']
cmd = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
out, err = cmd.communicate()
rc = cmd.returncode
if rc:
raise errors.AnsibleError("%s: %s" % self.inventory_file, err)
try:
groups = utils.json_loads(out)
except:
raise errors.AnsibleError("invalid JSON response from script: %s" % self.inventory_file)
for (groupname, hostlist) in groups.iteritems():
for host in hostlist:
if host not in results:
results.append(host)
return (results, groups)
# *****************************************************
def _parse_yaml(self):
""" Load the inventory from a yaml file.
returns hosts and groups"""
data = utils.parse_yaml_from_file(self.inventory_file)
if type(data) != list:
raise errors.AnsibleError("YAML inventory should be a list.")
hosts = []
groups = {}
ungrouped = []
for item in data:
if type(item) == dict:
if "group" in item:
group_name = item["group"]
group_vars = []
if "vars" in item:
group_vars = item["vars"]
group_hosts = []
if "hosts" in item:
for host in item["hosts"]:
host_name = self._parse_yaml_host(host, group_vars)
group_hosts.append(host_name)
groups[group_name] = group_hosts
hosts.extend(group_hosts)
elif "host" in item:
host_name = self._parse_yaml_host(item)
hosts.append(host_name)
ungrouped.append(host_name)
else:
host_name = self._parse_yaml_host(item)
hosts.append(host_name)
ungrouped.append(host_name)
# filter duplicate hosts
output_hosts = []
for host in hosts:
if host not in output_hosts:
output_hosts.append(host)
if len(ungrouped) > 0 :
# hosts can be defined top-level, but also in a group
really_ungrouped = []
for host in ungrouped:
already_grouped = False
for name, group_hosts in groups.items():
if host in group_hosts:
already_grouped = True
if not already_grouped:
really_ungrouped.append(host)
groups["ungrouped"] = really_ungrouped
return output_hosts, groups
def _parse_yaml_host(self, item, variables=[]):
def set_variables(host, variables):
if type(variables) == list:
for variable in variables:
if len(variable) != 1:
raise errors.AnsibleError("Only one item expected in %s"%(variable))
k, v = variable.items()[0]
self._set_variable(host, k, v)
elif type(variables) == dict:
for k, v in variables.iteritems():
self._set_variable(host, k, v)
if type(item) in [str, unicode]:
set_variables(item, variables)
return item
elif type(item) == dict:
if "host" in item:
host_name = item["host"]
set_variables(host_name, variables)
if "vars" in item:
set_variables(host_name, item["vars"])
return host_name
else:
raise errors.AnsibleError("Unknown item in inventory: %s"%(item))
def _get_variables_from_script(self, host):
''' support per system variabes from external variable scripts, see web docs '''
cmd = [self.inventory_file, '--host', host]
cmd = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False
)
out, err = cmd.communicate()
variables = {}
try:
variables = utils.json_loads(out)
except:
raise errors.AnsibleError("%s returned invalid result when called with hostname %s" % (
self.inventory_file,
host
))
return variables
def _set_variable(self, host, key, value):
if not host in self._variables:
self._variables[host] = {}
self._variables[host][key] = value
def _matches(self, host_name, pattern):
''' returns if a hostname is matched by the pattern '''
# a pattern is in fnmatch format but more than one pattern
# can be strung together with semicolons. ex:
# atlanta-web*.example.com;dc-web*.example.com
if host_name == '':
return False
pattern = pattern.replace(";",":")
subpatterns = pattern.split(":")
for subpattern in subpatterns:
if subpattern == 'all':
return True
if fnmatch.fnmatch(host_name, subpattern):
return True
elif subpattern in self.groups:
if host_name in self.groups[subpattern]:
return True
return False

View file

@ -17,11 +17,11 @@
############################################# #############################################
import ansible.inventory
import ansible.runner import ansible.runner
import ansible.constants as C import ansible.constants as C
from ansible import utils from ansible import utils
from ansible import errors from ansible import errors
import shlex
import os import os
import time import time
@ -58,17 +58,33 @@ class PlayBook(object):
remote_port = C.DEFAULT_REMOTE_PORT, remote_port = C.DEFAULT_REMOTE_PORT,
transport = C.DEFAULT_TRANSPORT, transport = C.DEFAULT_TRANSPORT,
override_hosts = None, override_hosts = None,
extra_vars = None,
debug = False, debug = False,
verbose = False,
callbacks = None, callbacks = None,
runner_callbacks = None, runner_callbacks = None,
stats = None): stats = None,
sudo = False):
"""
playbook: path to a playbook file
host_list: path to a file like /etc/ansible/hosts
module_path: path to ansible modules, like /usr/share/ansible/
forks: desired level of paralellism
timeout: connection timeout
remote_user: run as this user if not specified in a particular play
remote_pass: use this remote password (for all plays) vs using SSH keys
sudo_pass: if sudo==True, and a password is required, this is the sudo password
remote_port: default remote port to use if not specified with the host or play
transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
override_hosts: skip the inventory file, just talk to these hosts
callbacks output callbacks for the playbook
runner_callbacks: more callbacks, this time for the runner API
stats: holds aggregrate data about events occuring to each host
sudo: if not specified per play, requests all plays use sudo mode
"""
if playbook is None or callbacks is None or runner_callbacks is None or stats is None: if playbook is None or callbacks is None or runner_callbacks is None or stats is None:
raise Exception('missing required arguments') raise Exception('missing required arguments')
self.host_list = host_list
self.module_path = module_path self.module_path = module_path
self.forks = forks self.forks = forks
self.timeout = timeout self.timeout = timeout
@ -77,20 +93,23 @@ class PlayBook(object):
self.remote_port = remote_port self.remote_port = remote_port
self.transport = transport self.transport = transport
self.debug = debug self.debug = debug
self.verbose = verbose
self.callbacks = callbacks self.callbacks = callbacks
self.runner_callbacks = runner_callbacks self.runner_callbacks = runner_callbacks
self.override_hosts = override_hosts self.override_hosts = override_hosts
self.extra_vars = extra_vars
self.stats = stats self.stats = stats
self.sudo = sudo
self.sudo_pass = sudo_pass self.sudo_pass = sudo_pass
self.basedir = os.path.dirname(playbook) self.basedir = os.path.dirname(playbook)
self.playbook = self._parse_playbook(playbook) self.playbook = self._parse_playbook(playbook)
self.host_list, self.groups = ansible.runner.Runner.parse_hosts( if override_hosts is not None:
host_list, override_hosts=self.override_hosts, extra_vars=self.extra_vars) if type(override_hosts) != list:
raise errors.AnsibleError("override hosts must be a list")
self.inventory = ansible.inventory.Inventory(override_hosts)
else:
self.inventory = ansible.inventory.Inventory(host_list)
# ***************************************************** # *****************************************************
def _get_vars(self, play, dirname): def _get_vars(self, play, dirname):
@ -98,8 +117,18 @@ class PlayBook(object):
if play.get('vars') is None: if play.get('vars') is None:
play['vars'] = {} play['vars'] = {}
vars = play['vars'] vars = play['vars']
if type(vars) != dict: if type(vars) not in [dict, list]:
raise errors.AnsibleError("'vars' section must contain only key/value pairs") raise errors.AnsibleError("'vars' section must contain only key/value pairs")
# translate a list of vars into a dict
if type(vars) == list:
varlist = vars
vars = {}
for item in varlist:
k, v = item.items()[0]
vars[k] = v
play['vars'] = vars
vars_prompt = play.get('vars_prompt', {}) vars_prompt = play.get('vars_prompt', {})
if type(vars_prompt) != dict: if type(vars_prompt) != dict:
raise errors.AnsibleError("'vars_prompt' section must contain only key/value pairs") raise errors.AnsibleError("'vars_prompt' section must contain only key/value pairs")
@ -178,10 +207,10 @@ class PlayBook(object):
if action is None: if action is None:
raise errors.AnsibleError('action is required') raise errors.AnsibleError('action is required')
produced_task = task.copy() produced_task = task.copy()
produced_task['action'] = utils.template(action, dict(item=item)) produced_task['action'] = utils.template(action, dict(item=item), SETUP_CACHE)
produced_task['name'] = utils.template(name, dict(item=item)) produced_task['name'] = utils.template(name, dict(item=item), SETUP_CACHE)
if only_if: if only_if:
produced_task['only_if'] = utils.template(only_if, dict(item=item)) produced_task['only_if'] = utils.template(only_if, dict(item=item), SETUP_CACHE)
new_tasks2.append(produced_task) new_tasks2.append(produced_task)
else: else:
new_tasks2.append(task) new_tasks2.append(task)
@ -233,7 +262,6 @@ class PlayBook(object):
def _async_poll(self, runner, hosts, async_seconds, async_poll_interval, only_if): def _async_poll(self, runner, hosts, async_seconds, async_poll_interval, only_if):
''' launch an async job, if poll_interval is set, wait for completion ''' ''' launch an async job, if poll_interval is set, wait for completion '''
runner.host_list = hosts
runner.background = async_seconds runner.background = async_seconds
results = runner.run() results = runner.run()
self.stats.compute(results, poll=True) self.stats.compute(results, poll=True)
@ -257,7 +285,7 @@ class PlayBook(object):
return results return results
clock = async_seconds clock = async_seconds
runner.host_list = self.hosts_to_poll(results) host_list = self.hosts_to_poll(results)
poll_results = results poll_results = results
while (clock >= 0): while (clock >= 0):
@ -267,11 +295,13 @@ class PlayBook(object):
runner.module_name = 'async_status' runner.module_name = 'async_status'
runner.background = 0 runner.background = 0
runner.pattern = '*' runner.pattern = '*'
self.inventory.restrict_to(host_list)
poll_results = runner.run() poll_results = runner.run()
self.stats.compute(poll_results, poll=True) self.stats.compute(poll_results, poll=True)
runner.host_list = self.hosts_to_poll(poll_results) host_list = self.hosts_to_poll(poll_results)
self.inventory.lift_restriction()
if len(runner.host_list) == 0: if len(host_list) == 0:
break break
if poll_results is None: if poll_results is None:
break break
@ -298,33 +328,40 @@ class PlayBook(object):
# ***************************************************** # *****************************************************
def _run_module(self, pattern, host_list, module, args, vars, remote_user, def _run_module(self, pattern, module, args, vars, remote_user,
async_seconds, async_poll_interval, only_if, sudo, transport): async_seconds, async_poll_interval, only_if, sudo, transport, port):
''' run a particular module step in a playbook ''' ''' run a particular module step in a playbook '''
hosts = [ h for h in host_list if (h not in self.stats.failures) and (h not in self.stats.dark)] hosts = [ h for h in self.inventory.list_hosts() if (h not in self.stats.failures) and (h not in self.stats.dark)]
self.inventory.restrict_to(hosts)
if port is None:
port=self.remote_port
runner = ansible.runner.Runner( runner = ansible.runner.Runner(
pattern=pattern, groups=self.groups, module_name=module, pattern=pattern, inventory=self.inventory, module_name=module,
module_args=args, host_list=hosts, forks=self.forks, module_args=args, forks=self.forks,
remote_pass=self.remote_pass, module_path=self.module_path, remote_pass=self.remote_pass, module_path=self.module_path,
timeout=self.timeout, remote_user=remote_user, timeout=self.timeout, remote_user=remote_user,
remote_port=self.remote_port, module_vars=vars, remote_port=port, module_vars=vars,
setup_cache=SETUP_CACHE, basedir=self.basedir, setup_cache=SETUP_CACHE, basedir=self.basedir,
conditional=only_if, callbacks=self.runner_callbacks, conditional=only_if, callbacks=self.runner_callbacks,
extra_vars=self.extra_vars, debug=self.debug, sudo=sudo, debug=self.debug, sudo=sudo,
transport=transport, sudo_pass=self.sudo_pass, is_playbook=True transport=transport, sudo_pass=self.sudo_pass, is_playbook=True
) )
if async_seconds == 0: if async_seconds == 0:
return runner.run() results = runner.run()
else: else:
return self._async_poll(runner, hosts, async_seconds, async_poll_interval, only_if) results = self._async_poll(runner, hosts, async_seconds, async_poll_interval, only_if)
self.inventory.lift_restriction()
return results
# ***************************************************** # *****************************************************
def _run_task(self, pattern=None, host_list=None, task=None, def _run_task(self, pattern=None, task=None,
remote_user=None, handlers=None, conditional=False, sudo=False, transport=None): remote_user=None, handlers=None, conditional=False, sudo=False, transport=None, port=None):
''' run a single task in the playbook and recursively run any subtasks. ''' ''' run a single task in the playbook and recursively run any subtasks. '''
# load the module name and parameters from the task entry # load the module name and parameters from the task entry
@ -340,7 +377,9 @@ class PlayBook(object):
tokens = action.split(None, 1) tokens = action.split(None, 1)
module_name = tokens[0] module_name = tokens[0]
module_args = tokens[1] module_args = ''
if len(tokens) > 1:
module_args = tokens[1]
# include task specific vars # include task specific vars
module_vars = task.get('vars') module_vars = task.get('vars')
@ -354,9 +393,9 @@ class PlayBook(object):
# load up an appropriate ansible runner to # load up an appropriate ansible runner to
# run the task in parallel # run the task in parallel
results = self._run_module(pattern, host_list, module_name, results = self._run_module(pattern, module_name,
module_args, module_vars, remote_user, async_seconds, module_args, module_vars, remote_user, async_seconds,
async_poll_interval, only_if, sudo, transport) async_poll_interval, only_if, sudo, transport, port)
self.stats.compute(results) self.stats.compute(results)
@ -406,7 +445,7 @@ class PlayBook(object):
# ***************************************************** # *****************************************************
def _do_conditional_imports(self, vars_files, host_list): def _do_conditional_imports(self, vars_files):
''' handle the vars_files section, which can contain variables ''' ''' handle the vars_files section, which can contain variables '''
# FIXME: save parsed variable results in memory to avoid excessive re-reading/parsing # FIXME: save parsed variable results in memory to avoid excessive re-reading/parsing
@ -417,7 +456,7 @@ class PlayBook(object):
if type(vars_files) != list: if type(vars_files) != list:
raise errors.AnsibleError("vars_files must be a list") raise errors.AnsibleError("vars_files must be a list")
for host in host_list: for host in self.inventory.list_hosts():
cache_vars = SETUP_CACHE.get(host,{}) cache_vars = SETUP_CACHE.get(host,{})
SETUP_CACHE[host] = cache_vars SETUP_CACHE[host] = cache_vars
for filename in vars_files: for filename in vars_files:
@ -426,7 +465,7 @@ class PlayBook(object):
found = False found = False
sequence = [] sequence = []
for real_filename in filename: for real_filename in filename:
filename2 = utils.path_dwim(self.basedir, utils.template(real_filename, cache_vars)) filename2 = utils.path_dwim(self.basedir, utils.template(real_filename, cache_vars, SETUP_CACHE))
sequence.append(filename2) sequence.append(filename2)
if os.path.exists(filename2): if os.path.exists(filename2):
found = True found = True
@ -442,7 +481,7 @@ class PlayBook(object):
) )
else: else:
filename2 = utils.path_dwim(self.basedir, utils.template(filename, cache_vars)) filename2 = utils.path_dwim(self.basedir, utils.template(filename, cache_vars, SETUP_CACHE))
if not os.path.exists(filename2): if not os.path.exists(filename2):
raise errors.AnsibleError("no file matched for vars_file import: %s" % filename2) raise errors.AnsibleError("no file matched for vars_file import: %s" % filename2)
data = utils.parse_yaml_from_file(filename2) data = utils.parse_yaml_from_file(filename2)
@ -460,25 +499,29 @@ class PlayBook(object):
if vars_files is not None: if vars_files is not None:
self.callbacks.on_setup_secondary() self.callbacks.on_setup_secondary()
self._do_conditional_imports(vars_files, self.host_list) self._do_conditional_imports(vars_files)
else: else:
self.callbacks.on_setup_primary() self.callbacks.on_setup_primary()
host_list = [ h for h in self.host_list if not (h in self.stats.failures or h in self.stats.dark) ] host_list = [ h for h in self.inventory.list_hosts(pattern)
if not (h in self.stats.failures or h in self.stats.dark) ]
self.inventory.restrict_to(host_list)
# push any variables down to the system # push any variables down to the system
setup_results = ansible.runner.Runner( setup_results = ansible.runner.Runner(
pattern=pattern, groups=self.groups, module_name='setup', pattern=pattern, module_name='setup',
module_args=vars, host_list=host_list, module_args=vars, inventory=self.inventory,
forks=self.forks, module_path=self.module_path, forks=self.forks, module_path=self.module_path,
timeout=self.timeout, remote_user=user, timeout=self.timeout, remote_user=user,
remote_pass=self.remote_pass, remote_port=self.remote_port, remote_pass=self.remote_pass, remote_port=port,
setup_cache=SETUP_CACHE, setup_cache=SETUP_CACHE,
callbacks=self.runner_callbacks, sudo=sudo, debug=self.debug, callbacks=self.runner_callbacks, sudo=sudo, debug=self.debug,
transport=transport, sudo_pass=self.sudo_pass, is_playbook=True transport=transport, sudo_pass=self.sudo_pass, is_playbook=True
).run() ).run()
self.stats.compute(setup_results, setup=True) self.stats.compute(setup_results, setup=True)
self.inventory.lift_restriction()
# now for each result, load into the setup cache so we can # now for each result, load into the setup cache so we can
# let runner template out future commands # let runner template out future commands
setup_ok = setup_results.get('contacted', {}) setup_ok = setup_results.get('contacted', {})
@ -487,15 +530,6 @@ class PlayBook(object):
for (host, result) in setup_ok.iteritems(): for (host, result) in setup_ok.iteritems():
SETUP_CACHE[host] = result SETUP_CACHE[host] = result
if self.extra_vars:
extra_vars = utils.parse_kv(self.extra_vars)
for h in self.host_list:
try:
SETUP_CACHE[h].update(extra_vars)
except:
SETUP_CACHE[h] = extra_vars
return host_list
# ***************************************************** # *****************************************************
def _run_play(self, pg): def _run_play(self, pg):
@ -514,7 +548,7 @@ class PlayBook(object):
handlers = pg.get('handlers', []) handlers = pg.get('handlers', [])
user = pg.get('user', self.remote_user) user = pg.get('user', self.remote_user)
port = pg.get('port', self.remote_port) port = pg.get('port', self.remote_port)
sudo = pg.get('sudo', False) sudo = pg.get('sudo', self.sudo)
transport = pg.get('connection', self.transport) transport = pg.get('connection', self.transport)
self.callbacks.on_play_start(pattern) self.callbacks.on_play_start(pattern)
@ -530,12 +564,12 @@ class PlayBook(object):
for task in tasks: for task in tasks:
self._run_task( self._run_task(
pattern=pattern, pattern=pattern,
host_list=self.host_list,
task=task, task=task,
handlers=handlers, handlers=handlers,
remote_user=user, remote_user=user,
sudo=sudo, sudo=sudo,
transport=transport transport=transport,
port=port
) )
# handlers only run on certain nodes, they are flagged by _flag_handlers # handlers only run on certain nodes, they are flagged by _flag_handlers
@ -547,16 +581,18 @@ class PlayBook(object):
for task in handlers: for task in handlers:
triggered_by = task.get('run', None) triggered_by = task.get('run', None)
if type(triggered_by) == list: if type(triggered_by) == list:
self.inventory.restrict_to(triggered_by)
self._run_task( self._run_task(
pattern=pattern, pattern=pattern,
task=task, task=task,
handlers=[], handlers=[],
host_list=triggered_by,
conditional=True, conditional=True,
remote_user=user, remote_user=user,
sudo=sudo, sudo=sudo,
transport=transport transport=transport,
port=port
) )
self.inventory.lift_restriction()
# end of execution for this particular pattern. Multiple patterns # end of execution for this particular pattern. Multiple patterns
# can be in a single playbook file # can be in a single playbook file

View file

@ -18,7 +18,6 @@
################################################ ################################################
import fnmatch
import multiprocessing import multiprocessing
import signal import signal
import os import os
@ -27,11 +26,11 @@ import Queue
import random import random
import traceback import traceback
import tempfile import tempfile
import subprocess import base64
import getpass
import ansible.constants as C import ansible.constants as C
import ansible.connection import ansible.connection
import ansible.inventory
from ansible import utils from ansible import utils
from ansible import errors from ansible import errors
from ansible import callbacks as ans_callbacks from ansible import callbacks as ans_callbacks
@ -68,17 +67,41 @@ def _executor_hook(job_queue, result_queue):
class Runner(object): class Runner(object):
_external_variable_script = None def __init__(self,
host_list=C.DEFAULT_HOST_LIST, module_path=C.DEFAULT_MODULE_PATH,
def __init__(self, host_list=C.DEFAULT_HOST_LIST, module_path=C.DEFAULT_MODULE_PATH,
module_name=C.DEFAULT_MODULE_NAME, module_args=C.DEFAULT_MODULE_ARGS, module_name=C.DEFAULT_MODULE_NAME, module_args=C.DEFAULT_MODULE_ARGS,
forks=C.DEFAULT_FORKS, timeout=C.DEFAULT_TIMEOUT, pattern=C.DEFAULT_PATTERN, forks=C.DEFAULT_FORKS, timeout=C.DEFAULT_TIMEOUT,
remote_user=C.DEFAULT_REMOTE_USER, remote_pass=C.DEFAULT_REMOTE_PASS, pattern=C.DEFAULT_PATTERN, remote_user=C.DEFAULT_REMOTE_USER,
sudo_pass=C.DEFAULT_SUDO_PASS, remote_port=C.DEFAULT_REMOTE_PORT, background=0, remote_pass=C.DEFAULT_REMOTE_PASS, remote_port=C.DEFAULT_REMOTE_PORT,
basedir=None, setup_cache=None, transport=C.DEFAULT_TRANSPORT, sudo_pass=C.DEFAULT_SUDO_PASS, background=0, basedir=None,
conditional='True', groups={}, callbacks=None, verbose=False, setup_cache=None, transport=C.DEFAULT_TRANSPORT, conditional='True',
debug=False, sudo=False, extra_vars=None, module_vars=None, is_playbook=False): callbacks=None, debug=False, sudo=False, module_vars=None,
is_playbook=False, inventory=None):
"""
host_list : path to a host list file, like /etc/ansible/hosts
module_path : path to modules, like /usr/share/ansible
module_name : which module to run (string)
module_args : args to pass to the module (string)
forks : desired level of paralellism (hosts to run on at a time)
timeout : connection timeout, such as a SSH timeout, in seconds
pattern : pattern or groups to select from in inventory
remote_user : connect as this remote username
remote_pass : supply this password (if not using keys)
remote_port : use this default remote port (if not set by the inventory system)
sudo_pass : sudo password if using sudo and sudo requires a password
background : run asynchronously with a cap of this many # of seconds (if not 0)
basedir : paths used by modules if not absolute are relative to here
setup_cache : this is a internalism that is going away
transport : transport mode (paramiko, local)
conditional : only execute if this string, evaluated, is True
callbacks : output callback class
sudo : log in as remote user and immediately sudo to root
module_vars : provides additional variables to a template. FIXME: just use module_args, remove
is_playbook : indicates Runner is being used by a playbook. affects behavior in various ways.
inventory : inventory object, if host_list is not provided
"""
if setup_cache is None: if setup_cache is None:
setup_cache = {} setup_cache = {}
if basedir is None: if basedir is None:
@ -93,11 +116,10 @@ class Runner(object):
self.transport = transport self.transport = transport
self.connector = ansible.connection.Connection(self, self.transport) self.connector = ansible.connection.Connection(self, self.transport)
if type(host_list) == str: if inventory is None:
self.host_list, self.groups = self.parse_hosts(host_list) self.inventory = ansible.inventory.Inventory(host_list)
else: else:
self.host_list = host_list self.inventory = inventory
self.groups = groups
self.setup_cache = setup_cache self.setup_cache = setup_cache
self.conditional = conditional self.conditional = conditional
@ -107,10 +129,8 @@ class Runner(object):
self.pattern = pattern self.pattern = pattern
self.module_args = module_args self.module_args = module_args
self.module_vars = module_vars self.module_vars = module_vars
self.extra_vars = extra_vars
self.timeout = timeout self.timeout = timeout
self.debug = debug self.debug = debug
self.verbose = verbose
self.remote_user = remote_user self.remote_user = remote_user
self.remote_pass = remote_pass self.remote_pass = remote_pass
self.remote_port = remote_port self.remote_port = remote_port
@ -129,116 +149,18 @@ class Runner(object):
self._tmp_paths = {} self._tmp_paths = {}
random.seed() random.seed()
# ***************************************************** # *****************************************************
@classmethod @classmethod
def parse_hosts_from_regular_file(cls, host_list): def parse_hosts(cls, host_list, override_hosts=None):
''' parse a textual host file '''
results = []
groups = dict(ungrouped=[])
lines = file(host_list).read().split("\n")
group_name = 'ungrouped'
for item in lines:
item = item.lstrip().rstrip()
if item.startswith("#"):
# ignore commented out lines
pass
elif item.startswith("["):
# looks like a group
group_name = item.replace("[","").replace("]","").lstrip().rstrip()
groups[group_name] = []
elif item != "":
# looks like a regular host
groups[group_name].append(item)
if not item in results:
results.append(item)
return (results, groups)
# *****************************************************
@classmethod
def parse_hosts_from_script(cls, host_list, extra_vars):
''' evaluate a script that returns list of hosts by groups '''
results = []
groups = dict(ungrouped=[])
host_list = os.path.abspath(host_list)
cls._external_variable_script = host_list
cmd = [host_list, '--list']
if extra_vars:
cmd.extend(['--extra-vars', extra_vars])
cmd = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
out, err = cmd.communicate()
rc = cmd.returncode
if rc:
raise errors.AnsibleError("%s: %s" % (host_list, err))
try:
groups = utils.json_loads(out)
except:
raise errors.AnsibleError("invalid JSON response from script: %s" % host_list)
for (groupname, hostlist) in groups.iteritems():
for host in hostlist:
if host not in results:
results.append(host)
return (results, groups)
# *****************************************************
@classmethod
def parse_hosts(cls, host_list, override_hosts=None, extra_vars=None):
''' parse the host inventory file, returns (hosts, groups) ''' ''' parse the host inventory file, returns (hosts, groups) '''
if override_hosts is not None: if override_hosts is None:
if type(override_hosts) != list: inventory = ansible.inventory.Inventory(host_list)
raise errors.AnsibleError("override hosts must be a list")
return (override_hosts, dict(ungrouped=override_hosts))
if type(host_list) == list:
raise Exception("function can only be called on inventory files")
host_list = os.path.expanduser(host_list)
if not os.path.exists(host_list):
raise errors.AnsibleFileNotFound("inventory file not found: %s" % host_list)
if not os.access(host_list, os.X_OK):
return Runner.parse_hosts_from_regular_file(host_list)
else: else:
return Runner.parse_hosts_from_script(host_list, extra_vars) inventory = ansible.inventory.Inventory(override_hosts)
# ***************************************************** return inventory.host_list, inventory.groups
def _matches(self, host_name, pattern):
''' returns if a hostname is matched by the pattern '''
# a pattern is in fnmatch format but more than one pattern
# can be strung together with semicolons. ex:
# atlanta-web*.example.com;dc-web*.example.com
if host_name == '':
return False
pattern = pattern.replace(";",":")
subpatterns = pattern.split(":")
for subpattern in subpatterns:
if subpattern == 'all':
return True
if fnmatch.fnmatch(host_name, subpattern):
return True
elif subpattern in self.groups:
if host_name in self.groups[subpattern]:
return True
return False
# *****************************************************
def _connect(self, host):
''' connects to a host, returns (is_successful, connection_object OR traceback_string) '''
try:
return [ True, self.connector.connect(host) ]
except errors.AnsibleConnectionFailed, e:
return [ False, "FAILED: %s" % str(e) ]
# ***************************************************** # *****************************************************
@ -263,7 +185,7 @@ class Runner(object):
if type(files) == str: if type(files) == str:
files = [ files ] files = [ files ]
for filename in files: for filename in files:
if not filename.startswith('/tmp/'): if filename.find('/tmp/') == -1:
raise Exception("not going to happen") raise Exception("not going to happen")
self._exec_command(conn, "rm -rf %s" % filename, None) self._exec_command(conn, "rm -rf %s" % filename, None)
@ -278,51 +200,22 @@ class Runner(object):
# ***************************************************** # *****************************************************
def _transfer_str(self, conn, tmp, name, args_str): def _transfer_str(self, conn, tmp, name, data):
''' transfer arguments as a single file to be fed to the module. ''' ''' transfer string to remote file '''
if type(args_str) == dict: if type(data) == dict:
args_str = utils.smjson(args_str) data = utils.smjson(data)
args_fd, args_file = tempfile.mkstemp() afd, afile = tempfile.mkstemp()
args_fo = os.fdopen(args_fd, 'w') afo = os.fdopen(afd, 'w')
args_fo.write(args_str) afo.write(data)
args_fo.flush() afo.flush()
args_fo.close() afo.close()
args_remote = os.path.join(tmp, name) remote = os.path.join(tmp, name)
conn.put_file(args_file, args_remote) conn.put_file(afile, remote)
os.unlink(args_file) os.unlink(afile)
return remote
return args_remote
# *****************************************************
def _add_variables_from_script(self, conn, inject):
''' support per system variabes from external variable scripts, see web docs '''
host = conn.host
cmd = [Runner._external_variable_script, '--host', host]
if self.extra_vars:
cmd.extend(['--extra-vars', self.extra_vars])
cmd = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False
)
out, err = cmd.communicate()
inject2 = {}
try:
inject2 = utils.json_loads(out)
except:
raise errors.AnsibleError("%s returned invalid result when called with hostname %s" % (
Runner._external_variable_script,
host
))
# store injected variables in the templates
inject.update(inject2)
# ***************************************************** # *****************************************************
@ -335,7 +228,7 @@ class Runner(object):
# TODO: keep this as a dict through the whole path to simplify this code # TODO: keep this as a dict through the whole path to simplify this code
for (k,v) in inject.iteritems(): for (k,v) in inject.iteritems():
if not k.startswith('facter_') and not k.startswith('ohai_'): if not k.startswith('facter_') and not k.startswith('ohai_') and not k.startswith('ansible_'):
if not is_dict: if not is_dict:
if str(v).find(" ") != -1: if str(v).find(" ") != -1:
v = "\"%s\"" % v v = "\"%s\"" % v
@ -375,19 +268,20 @@ class Runner(object):
''' runs a module that has already been transferred ''' ''' runs a module that has already been transferred '''
inject = self.setup_cache.get(conn.host,{}) inject = self.setup_cache.get(conn.host,{})
conditional = utils.double_template(self.conditional, inject) conditional = utils.double_template(self.conditional, inject, self.setup_cache)
if not eval(conditional): if not eval(conditional):
return [ utils.smjson(dict(skipped=True)), None, 'skipped' ] return [ utils.smjson(dict(skipped=True)), None, 'skipped' ]
if Runner._external_variable_script is not None: host_variables = self.inventory.get_variables(conn.host)
self._add_variables_from_script(conn, inject) inject.update(host_variables)
if self.module_name == 'setup': if self.module_name == 'setup':
args = self._add_setup_vars(inject, args) args = self._add_setup_vars(inject, args)
args = self._add_setup_metadata(args) args = self._add_setup_metadata(args)
if type(args) == dict: if type(args) == dict:
args = utils.bigjson(args) args = utils.bigjson(args)
args = utils.template(args, inject) args = utils.template(args, inject, self.setup_cache)
module_name_tail = remote_module_path.split("/")[-1] module_name_tail = remote_module_path.split("/")[-1]
@ -492,7 +386,11 @@ class Runner(object):
dest = options.get('dest', None) dest = options.get('dest', None)
if source is None or dest is None: if source is None or dest is None:
return (host, True, dict(failed=True, msg="src and dest are required"), '') return (host, True, dict(failed=True, msg="src and dest are required"), '')
# apply templating to source argument
inject = self.setup_cache.get(conn.host,{})
source = utils.template(source, inject, self.setup_cache)
# transfer the file to a remote tmp location # transfer the file to a remote tmp location
tmp_src = tmp + source.split('/')[-1] tmp_src = tmp + source.split('/')[-1]
conn.put_file(utils.path_dwim(self.basedir, source), tmp_src) conn.put_file(utils.path_dwim(self.basedir, source), tmp_src)
@ -524,8 +422,8 @@ class Runner(object):
return (host, True, dict(failed=True, msg="src and dest are required"), '') return (host, True, dict(failed=True, msg="src and dest are required"), '')
# files are saved in dest dir, with a subdir for each host, then the filename # files are saved in dest dir, with a subdir for each host, then the filename
filename = os.path.basename(source) dest = "%s/%s/%s" % (utils.path_dwim(self.basedir, dest), host, source)
dest = "%s/%s/%s" % (utils.path_dwim(self.basedir, dest), host, filename) dest = dest.replace("//","/")
# compare old and new md5 for support of change hooks # compare old and new md5 for support of change hooks
local_md5 = None local_md5 = None
@ -539,7 +437,6 @@ class Runner(object):
# fetch the file and check for changes # fetch the file and check for changes
conn.fetch_file(source, dest) conn.fetch_file(source, dest)
new_md5 = os.popen("md5sum %s" % dest).read().split()[0] new_md5 = os.popen("md5sum %s" % dest).read().split()[0]
changed = (new_md5 != local_md5)
if new_md5 != remote_md5: if new_md5 != remote_md5:
return (host, True, dict(failed=True, msg="md5 mismatch", md5sum=new_md5), '') return (host, True, dict(failed=True, msg="md5 mismatch", md5sum=new_md5), '')
return (host, True, dict(changed=True, md5sum=new_md5), '') return (host, True, dict(changed=True, md5sum=new_md5), '')
@ -577,32 +474,54 @@ class Runner(object):
if source is None or dest is None: if source is None or dest is None:
return (host, True, dict(failed=True, msg="src and dest are required"), '') return (host, True, dict(failed=True, msg="src and dest are required"), '')
if metadata is None: # apply templating to source argument so vars can be used in the path
if self.remote_user == 'root': inject = self.setup_cache.get(conn.host,{})
metadata = '/etc/ansible/setup' source = utils.template(source, inject, self.setup_cache)
else:
metadata = '~/.ansible/setup'
# first copy the source template over (host, ok, data, err) = (None, None, None, None)
temppath = tmp + os.path.split(source)[-1]
conn.put_file(utils.path_dwim(self.basedir, source), temppath) if not self.is_playbook:
# not running from a playbook so we have to fetch the remote
# setup file contents before proceeding...
if metadata is None:
if self.remote_user == 'root':
metadata = '/etc/ansible/setup'
else:
# path is expanded on remote side
metadata = "~/.ansible/setup"
# install the template module
slurp_module = self._transfer_module(conn, tmp, 'slurp')
# run the slurp module to get the metadata file
args = "src=%s" % metadata
(result1, err, executed) = self._execute_module(conn, tmp, slurp_module, args)
result1 = utils.json_loads(result1)
if not 'content' in result1 or result1.get('encoding','base64') != 'base64':
result1['failed'] = True
return self._return_from_module(conn, host, result1, err, executed)
content = base64.b64decode(result1['content'])
inject = utils.json_loads(content)
# install the template module # install the template module
template_module = self._transfer_module(conn, tmp, 'template') copy_module = self._transfer_module(conn, tmp, 'copy')
# transfer module vars # template the source data locally
if self.module_vars: source_data = file(utils.path_dwim(self.basedir, source)).read()
vars = utils.bigjson(self.module_vars) resultant = ''
vars_path = self._transfer_str(conn, tmp, 'module_vars', vars) try:
vars_arg=" vars=%s"%(vars_path) resultant = utils.template(source_data, inject, self.setup_cache)
else: except Exception, e:
vars_arg="" return (host, False, dict(failed=True, msg=str(e)), '')
xfered = self._transfer_str(conn, tmp, 'source', resultant)
# run the template module
args = "src=%s dest=%s metadata=%s%s" % (temppath, dest, metadata, vars_arg) # run the COPY module
(result1, err, executed) = self._execute_module(conn, tmp, template_module, args) args = "src=%s dest=%s" % (xfered, dest)
(result1, err, executed) = self._execute_module(conn, tmp, copy_module, args)
(host, ok, data, err) = self._return_from_module(conn, host, result1, err, executed) (host, ok, data, err) = self._return_from_module(conn, host, result1, err, executed)
# modify file attribs if needed
if ok: if ok:
return self._chain_file_module(conn, tmp, data, err, options, executed) return self._chain_file_module(conn, tmp, data, err, options, executed)
else: else:
@ -628,12 +547,17 @@ class Runner(object):
def _executor_internal(self, host): def _executor_internal(self, host):
''' callback executed in parallel for each host. returns (hostname, connected_ok, extra) ''' ''' callback executed in parallel for each host. returns (hostname, connected_ok, extra) '''
ok, conn = self._connect(host) host_variables = self.inventory.get_variables(host)
if not ok: port = host_variables.get('ansible_ssh_port', self.remote_port)
return [ host, False, conn , None]
conn = None
try:
conn = self.connector.connect(host, port)
except errors.AnsibleConnectionFailed, e:
return [ host, False, "FAILED: %s" % str(e), None ]
cache = self.setup_cache.get(host, {}) cache = self.setup_cache.get(host, {})
module_name = utils.template(self.module_name, cache) module_name = utils.template(self.module_name, cache, self.setup_cache)
tmp = self._get_tmp_path(conn) tmp = self._get_tmp_path(conn)
result = None result = None
@ -692,7 +616,14 @@ class Runner(object):
def _get_tmp_path(self, conn): def _get_tmp_path(self, conn):
''' gets a temporary path on a remote box ''' ''' gets a temporary path on a remote box '''
result, err = self._exec_command(conn, "mktemp -d /tmp/ansible.XXXXXX", None, sudoable=False) basetmp = "/var/tmp"
if self.remote_user != 'root':
basetmp = "/home/%s/.ansible/tmp" % self.remote_user
cmd = "mktemp -d %s/ansible.XXXXXX" % basetmp
if self.remote_user != 'root':
cmd = "mkdir -p %s && %s" % (basetmp, cmd)
result, err = self._exec_command(conn, cmd, None, sudoable=False)
cleaned = result.split("\n")[0].strip() + '/' cleaned = result.split("\n")[0].strip() + '/'
return cleaned return cleaned
@ -714,13 +645,6 @@ class Runner(object):
# ***************************************************** # *****************************************************
def _match_hosts(self, pattern):
''' return all matched hosts fitting a pattern '''
return [ h for h in self.host_list if self._matches(h, pattern) ]
# *****************************************************
def _parallel_exec(self, hosts): def _parallel_exec(self, hosts):
''' handles mulitprocessing when more than 1 fork is required ''' ''' handles mulitprocessing when more than 1 fork is required '''
@ -767,7 +691,7 @@ class Runner(object):
results2["dark"][host] = result results2["dark"][host] = result
# hosts which were contacted but never got a chance to return # hosts which were contacted but never got a chance to return
for host in self._match_hosts(self.pattern): for host in self.inventory.list_hosts(self.pattern):
if not (host in results2['dark'] or host in results2['contacted']): if not (host in results2['dark'] or host in results2['contacted']):
results2["dark"][host] = {} results2["dark"][host] = {}
@ -779,7 +703,7 @@ class Runner(object):
''' xfer & run module on all matched hosts ''' ''' xfer & run module on all matched hosts '''
# find hosts that match the pattern # find hosts that match the pattern
hosts = self._match_hosts(self.pattern) hosts = self.inventory.list_hosts(self.pattern)
if len(hosts) == 0: if len(hosts) == 0:
self.callbacks.on_no_hosts() self.callbacks.on_no_hosts()
return dict(contacted={}, dark={}) return dict(contacted={}, dark={})

View file

@ -33,7 +33,6 @@ except ImportError:
from ansible import errors from ansible import errors
import ansible.constants as C import ansible.constants as C
############################################################### ###############################################################
# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS # UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
############################################################### ###############################################################
@ -239,14 +238,16 @@ def varReplace(raw, vars):
return ''.join(done) return ''.join(done)
def template(text, vars): def template(text, vars, setup_cache):
''' run a text buffer through the templating engine ''' ''' run a text buffer through the templating engine '''
vars = vars.copy()
text = varReplace(str(text), vars) text = varReplace(str(text), vars)
vars['hostvars'] = setup_cache
template = jinja2.Template(text) template = jinja2.Template(text)
return template.render(vars) return template.render(vars)
def double_template(text, vars): def double_template(text, vars, setup_cache):
return template(template(text, vars), vars) return template(template(text, vars, setup_cache), vars, setup_cache)
def template_from_file(path, vars): def template_from_file(path, vars):
''' run a file through the templating engine ''' ''' run a file through the templating engine '''
@ -279,7 +280,7 @@ class SortedOptParser(optparse.OptionParser):
self.option_list.sort(key=methodcaller('get_opt_string')) self.option_list.sort(key=methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None) return optparse.OptionParser.format_help(self, formatter=None)
def base_parser(constants=C, usage="", output_opts=False, port_opts=False, runas_opts=False, async_opts=False, connect_opts=False): def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, async_opts=False, connect_opts=False):
''' create an options parser for any ansible script ''' ''' create an options parser for any ansible script '''
parser = SortedOptParser(usage) parser = SortedOptParser(usage)
@ -301,11 +302,6 @@ def base_parser(constants=C, usage="", output_opts=False, port_opts=False, runas
dest='timeout', dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT) help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if port_opts:
parser.add_option('-p', '--port', default=constants.DEFAULT_REMOTE_PORT, type='int',
dest='remote_port',
help="override the remote ssh port (default=%s)" % constants.DEFAULT_REMOTE_PORT)
if output_opts: if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true', parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output') help='condense output')

View file

@ -42,7 +42,7 @@ def fail_json(**kwargs):
exit_json(rc=1, **kwargs) exit_json(rc=1, **kwargs)
try: try:
import apt import apt, apt_pkg
except ImportError: except ImportError:
fail_json(msg="could not import apt, please install the python-apt package on this host") fail_json(msg="could not import apt, please install the python-apt package on this host")
@ -63,17 +63,30 @@ def run_apt(command):
rc = cmd.returncode rc = cmd.returncode
return rc, out, err return rc, out, err
def package_status(pkgspec, cache): def package_split(pkgspec):
try: parts = pkgspec.split('=')
pkg = cache[pkgspec] if len(parts) > 1:
except: return parts[0], parts[1]
fail_json(msg="No package matching '%s' is available" % pkgspec) else:
return (pkg.is_installed, pkg.is_upgradable) return parts[0], None
def install(pkgspec, cache, upgrade=False): def package_status(pkgname, version, cache):
(installed, upgradable) = package_status(pkgspec, cache) try:
if (not installed) or (upgrade and upgradable): pkg = cache[pkgname]
except KeyError:
fail_json(msg="No package matching '%s' is available" % pkgname)
if version:
return pkg.is_installed and pkg.installed.version == version, False
else:
return pkg.is_installed, pkg.is_upgradable
def install(pkgspec, cache, upgrade=False, default_release=None):
name, version = package_split(pkgspec)
installed, upgradable = package_status(name, version, cache)
if not installed or (upgrade and upgradable):
cmd = "%s -q -y install '%s'" % (APT, pkgspec) cmd = "%s -q -y install '%s'" % (APT, pkgspec)
if default_release:
cmd += " -t '%s'" % (default_release,)
rc, out, err = run_apt(cmd) rc, out, err = run_apt(cmd)
if rc: if rc:
fail_json(msg="'apt-get install %s' failed: %s" % (pkgspec, err)) fail_json(msg="'apt-get install %s' failed: %s" % (pkgspec, err))
@ -82,15 +95,16 @@ def install(pkgspec, cache, upgrade=False):
return False return False
def remove(pkgspec, cache, purge=False): def remove(pkgspec, cache, purge=False):
(installed, upgradable) = package_status(pkgspec, cache) name, version = package_split(pkgspec)
installed, upgradable = package_status(name, version, cache)
if not installed: if not installed:
return False return False
else: else:
purge = '--purge' if purge else '' purge = '--purge' if purge else ''
cmd = "%s -q -y %s remove '%s'" % (APT, purge, pkgspec) cmd = "%s -q -y %s remove '%s'" % (APT, purge, name)
rc, out, err = run_apt(cmd) rc, out, err = run_apt(cmd)
if rc: if rc:
fail_json(msg="'apt-get remove %s' failed: %s" % (pkgspec, err)) fail_json(msg="'apt-get remove %s' failed: %s" % (name, err))
return True return True
@ -109,13 +123,14 @@ if not len(items):
params = {} params = {}
for x in items: for x in items:
(k, v) = x.split("=") (k, v) = x.split("=", 1)
params[k] = v params[k] = v
state = params.get('state','installed') state = params.get('state', 'installed')
package = params.get('pkg', params.get('package', params.get('name', None))) package = params.get('pkg', params.get('package', params.get('name', None)))
update_cache = params.get('update-cache', 'no') update_cache = params.get('update-cache', 'no')
purge = params.get('purge', 'no') purge = params.get('purge', 'no')
default_release = params.get('default-release', None)
if state not in ['installed', 'latest', 'removed']: if state not in ['installed', 'latest', 'removed']:
fail_json(msg='invalid state') fail_json(msg='invalid state')
@ -130,6 +145,10 @@ if package is None and update_cache != 'yes':
fail_json(msg='pkg=name and/or update-cache=yes is required') fail_json(msg='pkg=name and/or update-cache=yes is required')
cache = apt.Cache() cache = apt.Cache()
if default_release:
apt_pkg.config['APT::Default-Release'] = default_release
# reopen cache w/ modified config
cache.open()
if update_cache == 'yes': if update_cache == 'yes':
cache.update() cache.update()
@ -137,10 +156,16 @@ if update_cache == 'yes':
if package == None: if package == None:
exit_json(changed=False) exit_json(changed=False)
if package.count('=') > 1:
fail_json(msg='invalid package spec')
if state == 'latest': if state == 'latest':
changed = install(package, cache, upgrade=True) if '=' in package:
fail_json(msg='version number inconsistent with state=latest')
changed = install(package, cache, upgrade=True,
default_release=default_release)
elif state == 'installed': elif state == 'installed':
changed = install(package, cache) changed = install(package, cache, default_release=default_release)
elif state == 'removed': elif state == 'removed':
changed = remove(package, cache, purge == 'yes') changed = remove(package, cache, purge == 'yes')

View file

@ -42,7 +42,10 @@ for x in items:
src = params['src'] src = params['src']
dest = params['dest'] dest = params['dest']
if src:
src = os.path.expanduser(src)
if dest:
dest = os.path.expanduser(dest)
# raise an error if there is no src file # raise an error if there is no src file
if not os.path.exists(src): if not os.path.exists(src):

View file

@ -72,6 +72,21 @@ def add_path_info(kwargs):
kwargs['state'] = 'absent' kwargs['state'] = 'absent'
return kwargs return kwargs
# If selinux fails to find a default, return an array of None
def selinux_default_context(path, mode=0):
context = [None, None, None, None]
if not HAVE_SELINUX:
return context
try:
ret = selinux.matchpathcon(path, mode)
except OSError:
return context
if ret[0] == -1:
return context
context = ret[1].split(':')
debug("got default secontext=%s" % ret[1])
return context
# =========================================== # ===========================================
argfile = sys.argv[1] argfile = sys.argv[1]
@ -89,7 +104,11 @@ for x in items:
state = params.get('state','file') state = params.get('state','file')
path = params.get('path', params.get('dest', params.get('name', None))) path = params.get('path', params.get('dest', params.get('name', None)))
if path:
path = os.path.expanduser(path)
src = params.get('src', None) src = params.get('src', None)
if src:
src = os.path.expanduser(src)
dest = params.get('dest', None) dest = params.get('dest', None)
mode = params.get('mode', None) mode = params.get('mode', None)
owner = params.get('owner', None) owner = params.get('owner', None)
@ -102,8 +121,16 @@ recurse = params.get('recurse', 'false')
seuser = params.get('seuser', None) seuser = params.get('seuser', None)
serole = params.get('serole', None) serole = params.get('serole', None)
setype = params.get('setype', None) setype = params.get('setype', None)
serange = params.get('serange', 's0') selevel = params.get('serange', 's0')
secontext = [seuser, serole, setype, serange] context = params.get('context', None)
secontext = [seuser, serole, setype, selevel]
if context is not None:
if context != 'default':
fail_json(msg='invalid context: %s' % context)
if seuser is not None or serole is not None or setype is not None:
fail_json(msg='cannot define context=default and seuser, serole or setype')
secontext = selinux_default_context(path)
if state not in [ 'file', 'directory', 'link', 'absent']: if state not in [ 'file', 'directory', 'link', 'absent']:
fail_json(msg='invalid state: %s' % state) fail_json(msg='invalid state: %s' % state)
@ -144,34 +171,14 @@ def selinux_context(path):
debug("got current secontext=%s" % ret[1]) debug("got current secontext=%s" % ret[1])
return context return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(path, mode=0):
context = [None, None, None, None]
print >>sys.stderr, path
if not HAVE_SELINUX:
return context
try:
ret = selinux.matchpathcon(path, mode)
except OSError:
return context
if ret[0] == -1:
return context
context = ret[1].split(':')
debug("got default secontext=%s" % ret[1])
return context
def set_context_if_different(path, context, changed): def set_context_if_different(path, context, changed):
if not HAVE_SELINUX: if not HAVE_SELINUX:
return changed return changed
cur_context = selinux_context(path) cur_context = selinux_context(path)
new_context = selinux_default_context(path) new_context = list(cur_context)
for i in range(len(context)): for i in range(len(context)):
if context[i] is not None and context[i] != cur_context[i]: if context[i] is not None and context[i] != cur_context[i]:
debug('new context was %s' % new_context[i])
new_context[i] = context[i] new_context[i] = context[i]
debug('new context is %s' % new_context[i])
elif new_context[i] is None:
new_context[i] = cur_context[i]
debug("current secontext is %s" % ':'.join(cur_context)) debug("current secontext is %s" % ':'.join(cur_context))
debug("new secontext is %s" % ':'.join(new_context)) debug("new secontext is %s" % ':'.join(new_context))
if cur_context != new_context: if cur_context != new_context:

View file

@ -19,9 +19,16 @@
DEFAULT_ANSIBLE_SETUP = "/etc/ansible/setup" DEFAULT_ANSIBLE_SETUP = "/etc/ansible/setup"
import array
import fcntl
import glob
import sys import sys
import os import os
import platform
import re
import shlex import shlex
import socket
import struct
import subprocess import subprocess
import traceback import traceback
@ -30,6 +37,244 @@ try:
except ImportError: except ImportError:
import simplejson as json import simplejson as json
_I386RE = re.compile(r'i[3456]86')
SIOCGIFCONF = 0x8912
SIOCGIFHWADDR = 0x8927
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
# DMI bits
DMI_DICT = { 'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor' }
# From smolt and DMI spec
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA" ]
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
OSDIST_DICT = { '/etc/redhat-release': 'RedHat',
'/etc/vmware-release': 'VMwareESX' }
def get_file_content(path):
if os.path.exists(path) and os.access(path, os.R_OK):
data = open(path).read().strip()
if len(data) == 0:
data = None
else:
data = None
return data
# platform.dist() is deprecated in 2.6
# in 2.6 and newer, you should use platform.linux_distribution()
def get_distribution_facts(facts):
dist = platform.dist()
facts['distribution'] = dist[0].capitalize() or 'NA'
facts['distribution_version'] = dist[1] or 'NA'
facts['distribution_release'] = dist[2] or 'NA'
# Try to handle the exceptions now ...
for (path, name) in OSDIST_DICT.items():
if os.path.exists(path):
if facts['distribution'] == 'Fedora':
pass
elif name == 'RedHat':
data = get_file_content(path)
if 'Red Hat' in data:
facts['distribution'] = name
else:
facts['distribution'] = data.split()[0]
else:
facts['distribution'] = name
# Platform
# patform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(facts):
facts['system'] = platform.system()
facts['kernel'] = platform.release()
facts['machine'] = platform.machine()
facts['python_version'] = platform.python_version()
if facts['machine'] == 'x86_64':
facts['architecture'] = facts['machine']
elif _I386RE.search(facts['machine']):
facts['architecture'] = 'i386'
else:
facts['archtecture'] = facts['machine']
if facts['system'] == 'Linux':
get_distribution_facts(facts)
def get_memory_facts(facts):
if not os.access("/proc/meminfo", os.R_OK):
return facts
for line in open("/proc/meminfo").readlines():
data = line.split(":", 1)
key = data[0]
if key in MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
facts["%s_mb" % key.lower()] = long(val) / 1024
def get_cpu_facts(facts):
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return facts
for line in open("/proc/cpuinfo").readlines():
data = line.split(":", 1)
key = data[0].strip()
if key == 'model name':
if 'processor' not in facts:
facts['processor'] = []
facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
facts['processor_count'] = len(sockets)
facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
facts['processor_count'] = i
facts['processor_cores'] = 'NA'
def get_hardware_facts(facts):
get_memory_facts(facts)
get_cpu_facts(facts)
for (key,path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
facts['form_factor'] = FORM_FACTOR[int(data)]
else:
facts[key] = data
else:
facts[key] = 'NA'
def get_linux_virtual_facts(facts):
if os.path.exists("/proc/xen"):
facts['virtualization_type'] = 'xen'
facts['virtualization_role'] = 'guest'
if os.path.exists("/proc/xen/capabilities"):
facts['virtualization_role'] = 'host'
if os.path.exists("/proc/modules"):
modules = []
for line in open("/proc/modules").readlines():
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
facts['virtualization_type'] = 'kvm'
facts['virtualization_role'] = 'host'
elif 'vboxdrv' in modules:
facts['virtualization_type'] = 'virtualbox'
facts['virtualization_role'] = 'host'
elif 'vboxguest' in modules:
facts['virtualization_type'] = 'virtualbox'
facts['virtualization_role'] = 'guest'
if 'QEMU' in facts['processor'][0]:
facts['virtualization_type'] = 'kvm'
facts['virtualization_role'] = 'guest'
if facts['distribution'] == 'VMwareESX':
facts['virtualization_type'] = 'VMware'
facts['virtualization_role'] = 'host'
# You can spawn a dmidecode process and parse that or infer from devices
for dev_model in glob.glob('/proc/ide/hd*/model'):
info = open(dev_model).read()
if 'VMware' in info:
facts['virtualization_type'] = 'VMware'
facts['virtualization_role'] = 'guest'
elif 'Virtual HD' in info or 'Virtual CD' in info:
facts['virtualization_type'] = 'VirtualPC'
facts['virtualization_role'] = 'guest'
def get_virtual_facts(facts):
facts['virtualization_type'] = 'None'
facts['virtualization_role'] = 'None'
if facts['system'] == 'Linux':
facts = get_linux_virtual_facts(facts)
# get list of interfaces that are up
def get_interfaces():
length = 4096
offset = 32
step = 32
if platform.architecture()[0] == '64bit':
offset = 16
step = 40
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array.array('B', '\0' * length)
bytelen = struct.unpack('iL', fcntl.ioctl(
s.fileno(), SIOCGIFCONF, struct.pack(
'iL', length, names.buffer_info()[0])
))[0]
return [names.tostring()[i:i+offset].split('\0', 1)[0]
for i in range(0, bytelen, step)]
def get_iface_hwaddr(iface):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), SIOCGIFHWADDR,
struct.pack('256s', iface[:15]))
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
def get_network_facts(facts):
facts['fqdn'] = socket.gethostname()
facts['hostname'] = facts['fqdn'].split('.')[0]
facts['interfaces'] = get_interfaces()
for iface in facts['interfaces']:
facts[iface] = { 'macaddress': get_iface_hwaddr(iface) }
# This is lame, but there doesn't appear to be a good way
# to get all addresses for both IPv4 and IPv6.
cmd = subprocess.Popen("/sbin/ifconfig %s" % iface, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
for line in out.split('\n'):
data = line.split()
if 'inet addr' in line:
if 'ipv4' not in facts[iface]:
facts[iface]['ipv4'] = {}
facts[iface]['ipv4'] = { 'address': data[1].split(':')[1],
'netmask': data[-1].split(':')[1] }
if 'inet6 addr' in line:
(ip, prefix) = data[2].split('/')
scope = data[3].split(':')[1].lower()
if 'ipv6' not in facts[iface]:
facts[iface]['ipv6'] = []
facts[iface]['ipv6'].append( { 'address': ip,
'prefix': prefix,
'scope': scope } )
return facts
def get_public_ssh_host_keys(facts):
dsa = get_file_content('/etc/ssh/ssh_host_dsa_key.pub')
rsa = get_file_content('/etc/ssh/ssh_host_rsa_key.pub')
if dsa is None:
dsa = 'NA'
else:
facts['ssh_host_key_dsa_public'] = dsa.split()[1]
if rsa is None:
rsa = 'NA'
else:
facts['ssh_host_key_rsa_public'] = rsa.split()[1]
def get_service_facts(facts):
get_public_ssh_host_keys(facts)
def ansible_facts():
facts = {}
get_platform_facts(facts)
get_hardware_facts(facts)
get_virtual_facts(facts)
get_network_facts(facts)
get_service_facts(facts)
return facts
# load config & template variables # load config & template variables
if len(sys.argv) == 1: if len(sys.argv) == 1:
@ -65,6 +310,10 @@ if not os.path.exists(ansible_file):
else: else:
md5sum = os.popen("md5sum %s" % ansible_file).read().split()[0] md5sum = os.popen("md5sum %s" % ansible_file).read().split()[0]
# Get some basic facts in case facter or ohai are not installed
for (k, v) in ansible_facts().items():
setup_options["ansible_%s" % k] = v
# if facter is installed, and we can use --json because # if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON # ruby-json is ALSO installed, include facter data in the JSON

71
library/slurp Executable file
View file

@ -0,0 +1,71 @@
#!/usr/bin/python
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import shlex
import base64
try:
import json
except ImportError:
import simplejson as json
# ===========================================
# convert arguments of form a=b c=d
# to a dictionary
if len(sys.argv) == 1:
sys.exit(1)
argfile = sys.argv[1]
if not os.path.exists(argfile):
sys.exit(1)
items = shlex.split(open(argfile, 'r').read())
params = {}
for x in items:
(k, v) = x.split("=")
params[k] = v
source = os.path.expanduser(params['src'])
# ==========================================
# raise an error if there is no template metadata
if not os.path.exists(source):
print json.dumps(dict(
failed = 1,
msg = "file not found: %s" % source
))
sys.exit(1)
if not os.access(source, os.R_OK):
print json.dumps(dict(
failed = 1,
msg = "file is not readable: %s" % source
))
sys.exit(1)
# ==========================================
data = file(source).read()
data = base64.b64encode(data)
print json.dumps(dict(content=data, encoding='base64'))
sys.exit(0)

View file

@ -17,119 +17,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys # hey the Ansible template module isn't really a remote transferred
import os # module. All the magic happens in Runner.py making use of the
import jinja2 # copy module, and if not running from a playbook, also the 'slurp'
import shlex # module.
try:
import json
except ImportError:
import simplejson as json
environment = jinja2.Environment()
# ===========================================
# convert arguments of form a=b c=d
# to a dictionary
# FIXME: make more idiomatic
if len(sys.argv) == 1:
sys.exit(1)
argfile = sys.argv[1]
if not os.path.exists(argfile):
sys.exit(1)
items = shlex.split(open(argfile, 'r').read())
params = {}
for x in items:
(k, v) = x.split("=")
params[k] = v
source = params['src']
dest = params['dest']
metadata = params.get('metadata', '/etc/ansible/setup')
module_vars = params.get('vars')
# raise an error if there is no template metadata
if not os.path.exists(metadata):
print json.dumps({
"failed" : 1,
"msg" : "Missing %s, did you run the setup module yet?" % metadata
})
sys.exit(1)
# raise an error if we can't parse the template metadata
#data = {}
try:
f = open(metadata)
data = json.loads(f.read())
f.close()
except:
print json.dumps({
"failed" : 1,
"msg" : "Failed to parse/load %s, rerun the setup module?" % metadata
})
sys.exit(1)
if module_vars:
try:
f = open(module_vars)
vars = json.loads(f.read())
data.update(vars)
f.close()
except:
print json.dumps({
"failed" : 1,
"msg" : "Failed to parse/load %s." % module_vars
})
sys.exit(1)
if not os.path.exists(source):
print json.dumps({
"failed" : 1,
"msg" : "Source template could not be read: %s" % source
})
sys.exit(1)
source = file(source).read()
if os.path.isdir(dest):
print json.dumps({
"failed" : 1,
"msg" : "Destination is a directory"
})
sys.exit(1)
# record md5sum of original source file so we can report if it changed
changed = False
md5sum = None
if os.path.exists(dest):
md5sum = os.popen("md5sum %s" % dest).read().split()[0]
try:
# call Jinja2 here and save the new template file
template = environment.from_string(source)
data_out = template.render(data)
except jinja2.TemplateError, e:
print json.dumps({
"failed": True,
"msg" : e.message
})
sys.exit(1)
f = open(dest, "w+")
f.write(data_out)
f.close()
# record m5sum and return success and whether things have changed
md5sum2 = os.popen("md5sum %s" % dest).read().split()[0]
if md5sum != md5sum2:
changed = True
# mission accomplished
print json.dumps({
"md5sum" : md5sum2,
"changed" : changed
})

View file

@ -10,8 +10,7 @@ This software may be freely redistributed under the terms of the GNU
general public license. general public license.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software along with this program. If not, see <http://www.gnu.org/licenses/>.
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
""" """
VIRT_FAILED = 1 VIRT_FAILED = 1

44
packaging/arch/PKGBUILD Normal file
View file

@ -0,0 +1,44 @@
#Maintainer: Michel Blanc <mblanc@erasme.org>
pkgname=ansible-git
pkgver=20120419
pkgrel=1
pkgdesc="A radically simple deployment, model-driven configuration management, and command execution framework"
arch=('any')
url="https://github.com/ansible/ansible"
license=('GPL3')
depends=('python2' 'python2-yaml' 'python-paramiko>=1.7.7' 'python2-jinja' 'python-simplejson')
makedepends=('git' 'asciidoc' 'fakeroot')
_gitroot="https://github.com/ansible/ansible"
_gitname="ansible"
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [ -d $_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot $_gitname
fi
msg "GIT checkout done or server timeout"
cd "$srcdir/$_gitname"
make
}
package() {
cd "$srcdir/$_gitname"
mkdir -p ${pkgdir}/usr/share/ansible
cp ./library/* ${pkgdir}/usr/share/ansible/
python setup.py install -O1 --root=${pkgdir}
install -D docs/man/man1/ansible.1 ${pkgdir}/usr/share/man/man1/ansible.1
install -D docs/man/man1/ansible-playbook.1 ${pkgdir}/usr/share/man/man1/ansible-playbook.1
gzip -9 ${pkgdir}/usr/share/man/man1/ansible.1
gzip -9 ${pkgdir}/usr/share/man/man1/ansible-playbook.1
}

View file

@ -0,0 +1,11 @@
I have added a debian folder for use in building a .deb file for ansible. From the ansible directory you can run the following command to construct a debian package of ansible.
~/ansible$ dpkg-buildpackage -us -uc -rfakeroot
The debian package files will be placed in the ../ directory and can be installed with the following command:
~/$ sudo dpkg -i .deb
Dpkg -i doesn't resolve dependencies, so if the previous command fails because of dependencies, you will need to run the following to install the dependencies (if needed) and then re-run the dpkg -i command to install the package:
$ sudo apt-get -f install
--Henry Graham

View file

@ -0,0 +1,3 @@
etc/ansible
usr/lib/python2.7/site-packages
usr/share/ansible

View file

@ -0,0 +1,5 @@
examples/hosts etc/ansible
library/* usr/share/ansible
docs/man/man1/ansible.1 usr/share/man/man1
docs/man/man1/ansible-playbook.1 usr/share/man/man1
bin/* usr/bin

View file

@ -0,0 +1,5 @@
ansible (0.0.2) debian; urgency=low
* Initial Release
-- Henry Graham (hzgraham) <Henry.Graham@mail.wvu.edu> Tue, 17 Apr 2012 17:17:01 -0400

1
packaging/debian/compat Normal file
View file

@ -0,0 +1 @@
5

13
packaging/debian/control Normal file
View file

@ -0,0 +1,13 @@
Source: ansible
Section: admin
Priority: optional
Maintainer: Henry Graham (hzgraham) <Henry.Graham@mail.wvu.edu>
Build-Depends: cdbs, debhelper (>= 5.0.0)
Standards-Version: 3.9.1
Homepage: http://ansible.github.com/
Package: ansible
Architecture: all
Depends: python, python-support (>= 0.90), python-jinja2, python-yaml, python-paramiko
Description: Ansible Application
Ansible is a extra-simple tool/API for doing 'parallel remote things' over SSH executing commands, running "modules", or executing larger 'playbooks' that can serve as a configuration management or deployment system.

View file

@ -0,0 +1,26 @@
This package was debianized by Henry Graham (hzgraham) <Henry.Graham@mail.wvu.edu> on
Tue, 17 Apr 2012 12:19:47 -0400.
It was downloaded from https://github.com/ansible/ansible.git
Copyright: Henry Graham (hzgraham) <Henry.Graham@mail.wvu.edu>
License:
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 dated June, 1991.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this package; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
USA.
On Debian systems, the complete text of the GNU General
Public License can be found in `/usr/share/common-licenses/GPL'.

1
packaging/debian/docs Normal file
View file

@ -0,0 +1 @@
README.md

View file

@ -0,0 +1 @@
2

6
packaging/debian/rules Executable file
View file

@ -0,0 +1,6 @@
#!/usr/bin/make -f
# -- makefile --
include /usr/share/cdbs/1/rules/debhelper.mk
DEB_PYTHON_SYSTEM = pysupport
include /usr/share/cdbs/1/class/python-distutils.mk

View file

@ -0,0 +1,3 @@
Gentoo ebuilds are available here:
https://github.com/uu/ubuilds

View file

@ -1,40 +1,45 @@
%if 0%{?rhel} <= 5
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} %{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
%endif
Name: ansible Name: ansible
Release: 1%{?dist} Release: 1%{?dist}
Summary: Minimal SSH command and control Summary: Minimal SSH command and control
Version: 0.0.2 Version: 0.0.2
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
Group: Development/Libraries Group: Development/Libraries
License: GPLv3 License: GPLv3
Prefix: %{_prefix}
Source0: https://github.com/downloads/ansible/ansible/%{name}-%{version}.tar.gz Source0: https://github.com/downloads/ansible/ansible/%{name}-%{version}.tar.gz
Url: http://ansible.github.com Url: http://ansible.github.com
BuildArch: noarch BuildArch: noarch
BuildRequires: asciidoc BuildRequires: python2-devel
BuildRequires: python-devel
Requires: PyYAML
Requires: python-paramiko Requires: python-paramiko
Requires: python-jinja2 Requires: python-jinja2
%description %description
Ansible is a extra-simple tool/API for doing 'parallel remote things' over SSH
executing commands, running "modules", or executing larger 'playbooks' that Ansible is a radically simple model-driven configuration management,
can serve as a configuration management or deployment system. multi-node deployment, and remote task execution system. Ansible works
over SSH and does not require any software or daemons to be installed
on remote nodes. Extension modules can be written in any language and
are transferred to managed machines automatically.
%prep %prep
%setup -q -n %{name}-%{version} %setup -q
%build %build
python setup.py build %{__python} setup.py build
%install %install
python setup.py install -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES %{__python} setup.py install -O1 --root=$RPM_BUILD_ROOT
mkdir -p $RPM_BUILD_ROOT/etc/ansible/ mkdir -p $RPM_BUILD_ROOT/etc/ansible/
cp examples/hosts $RPM_BUILD_ROOT/etc/ansible/ cp examples/hosts $RPM_BUILD_ROOT/etc/ansible/
mkdir -p $RPM_BUILD_ROOT/%{_mandir}/man1/ mkdir -p $RPM_BUILD_ROOT/%{_mandir}/man1/
cp -v docs/man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1/ cp -v docs/man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1/
mkdir -p $RPM_BUILD_ROOT/%{_datadir}/ansible mkdir -p $RPM_BUILD_ROOT/%{_datadir}/ansible
cp -v library/* $RPM_BUILD_ROOT/%{_datadir}/ansible/ cp -v library/* $RPM_BUILD_ROOT/%{_datadir}/ansible/
@ -43,14 +48,13 @@ cp -v library/* $RPM_BUILD_ROOT/%{_datadir}/ansible/
rm -rf $RPM_BUILD_ROOT rm -rf $RPM_BUILD_ROOT
%files %files
%doc README.md PKG-INFO
%defattr(-,root,root) %defattr(-,root,root)
%{_mandir}/man1/*.gz %{python_sitelib}/ansible*
%{python_sitelib}/*
%{_bindir}/ansible* %{_bindir}/ansible*
%{_datadir}/ansible/* %{_datadir}/ansible
%config(noreplace) /etc/ansible/hosts %config(noreplace) %{_sysconfdir}/ansible
%config(noreplace) %{_sysconfdir}/ansible/ %doc README.md PKG-INFO
%doc %{_mandir}/man1/ansible*
%changelog %changelog

252
test/TestInventory.py Normal file
View file

@ -0,0 +1,252 @@
import os
import unittest
from ansible.inventory import Inventory
from ansible.runner import Runner
class TestInventory(unittest.TestCase):
def setUp(self):
self.cwd = os.getcwd()
self.test_dir = os.path.join(self.cwd, 'test')
self.inventory_file = os.path.join(self.test_dir, 'simple_hosts')
self.inventory_script = os.path.join(self.test_dir, 'inventory_api.py')
self.inventory_yaml = os.path.join(self.test_dir, 'yaml_hosts')
os.chmod(self.inventory_script, 0755)
def tearDown(self):
os.chmod(self.inventory_script, 0644)
### Simple inventory format tests
def simple_inventory(self):
return Inventory( self.inventory_file )
def script_inventory(self):
return Inventory( self.inventory_script )
def yaml_inventory(self):
return Inventory( self.inventory_yaml )
def test_simple(self):
inventory = self.simple_inventory()
hosts = inventory.list_hosts()
expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
assert hosts == expected_hosts
def test_simple_all(self):
inventory = self.simple_inventory()
hosts = inventory.list_hosts('all')
expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
assert hosts == expected_hosts
def test_simple_norse(self):
inventory = self.simple_inventory()
hosts = inventory.list_hosts("norse")
expected_hosts=['thor', 'odin', 'loki']
assert hosts == expected_hosts
def test_simple_ungrouped(self):
inventory = self.simple_inventory()
hosts = inventory.list_hosts("ungrouped")
expected_hosts=['jupiter', 'saturn']
assert hosts == expected_hosts
def test_simple_combined(self):
inventory = self.simple_inventory()
hosts = inventory.list_hosts("norse:greek")
expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
assert hosts == expected_hosts
def test_simple_restrict(self):
inventory = self.simple_inventory()
restricted_hosts = ['hera', 'poseidon', 'thor']
expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
inventory.restrict_to(restricted_hosts)
hosts = inventory.list_hosts("norse:greek")
assert hosts == restricted_hosts
inventory.lift_restriction()
hosts = inventory.list_hosts("norse:greek")
assert hosts == expected_hosts
def test_simple_vars(self):
inventory = self.simple_inventory()
vars = inventory.get_variables('thor')
assert vars == {}
def test_simple_port(self):
inventory = self.simple_inventory()
vars = inventory.get_variables('hera')
assert vars == {'ansible_ssh_port': 3000}
### Inventory API tests
def test_script(self):
inventory = self.script_inventory()
hosts = inventory.list_hosts()
expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
print "Expected: %s"%(expected_hosts)
print "Got : %s"%(hosts)
assert sorted(hosts) == sorted(expected_hosts)
def test_script_all(self):
inventory = self.script_inventory()
hosts = inventory.list_hosts('all')
expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
assert sorted(hosts) == sorted(expected_hosts)
def test_script_norse(self):
inventory = self.script_inventory()
hosts = inventory.list_hosts("norse")
expected_hosts=['thor', 'odin', 'loki']
assert sorted(hosts) == sorted(expected_hosts)
def test_script_combined(self):
inventory = self.script_inventory()
hosts = inventory.list_hosts("norse:greek")
expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
assert sorted(hosts) == sorted(expected_hosts)
def test_script_restrict(self):
inventory = self.script_inventory()
restricted_hosts = ['hera', 'poseidon', 'thor']
expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
inventory.restrict_to(restricted_hosts)
hosts = inventory.list_hosts("norse:greek")
assert sorted(hosts) == sorted(restricted_hosts)
inventory.lift_restriction()
hosts = inventory.list_hosts("norse:greek")
assert sorted(hosts) == sorted(expected_hosts)
def test_script_vars(self):
inventory = self.script_inventory()
vars = inventory.get_variables('thor')
assert vars == {"hammer":True}
### Tests for yaml inventory file
def test_yaml(self):
inventory = self.yaml_inventory()
hosts = inventory.list_hosts()
print hosts
expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
assert hosts == expected_hosts
def test_yaml_all(self):
inventory = self.yaml_inventory()
hosts = inventory.list_hosts('all')
expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
assert hosts == expected_hosts
def test_yaml_norse(self):
inventory = self.yaml_inventory()
hosts = inventory.list_hosts("norse")
expected_hosts=['thor', 'odin', 'loki']
assert hosts == expected_hosts
def test_simple_ungrouped(self):
inventory = self.yaml_inventory()
hosts = inventory.list_hosts("ungrouped")
expected_hosts=['jupiter']
assert hosts == expected_hosts
def test_yaml_combined(self):
inventory = self.yaml_inventory()
hosts = inventory.list_hosts("norse:greek")
expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
assert hosts == expected_hosts
def test_yaml_restrict(self):
inventory = self.yaml_inventory()
restricted_hosts = ['hera', 'poseidon', 'thor']
expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
inventory.restrict_to(restricted_hosts)
hosts = inventory.list_hosts("norse:greek")
assert hosts == restricted_hosts
inventory.lift_restriction()
hosts = inventory.list_hosts("norse:greek")
assert hosts == expected_hosts
def test_yaml_vars(self):
inventory = self.yaml_inventory()
vars = inventory.get_variables('thor')
assert vars == {"hammer":True}
def test_yaml_change_vars(self):
inventory = self.yaml_inventory()
vars = inventory.get_variables('thor')
vars["hammer"] = False
vars = inventory.get_variables('thor')
assert vars == {"hammer":True}
def test_yaml_host_vars(self):
inventory = self.yaml_inventory()
vars = inventory.get_variables('saturn')
assert vars == {"moon":"titan", "moon2":"enceladus"}
def test_yaml_port(self):
inventory = self.yaml_inventory()
vars = inventory.get_variables('hera')
assert vars == {'ansible_ssh_port': 3000, 'ntp_server': 'olympus.example.com'}
### Test Runner class method
def test_class_method(self):
hosts, groups = Runner.parse_hosts(self.inventory_file)
expected_hosts = ['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
assert hosts == expected_hosts
expected_groups= {
'ungrouped': ['jupiter', 'saturn'],
'greek': ['zeus', 'hera', 'poseidon'],
'norse': ['thor', 'odin', 'loki']
}
assert groups == expected_groups
def test_class_override(self):
override_hosts = ['thor', 'odin']
hosts, groups = Runner.parse_hosts(self.inventory_file, override_hosts)
assert hosts == override_hosts
assert groups == { 'ungrouped': override_hosts }

View file

@ -136,12 +136,13 @@ class TestPlaybook(unittest.TestCase):
timeout = 5, timeout = 5,
remote_user = self.user, remote_user = self.user,
remote_pass = None, remote_pass = None,
verbose = False,
stats = ans_callbacks.AggregateStats(), stats = ans_callbacks.AggregateStats(),
callbacks = self.test_callbacks, callbacks = self.test_callbacks,
runner_callbacks = self.test_callbacks runner_callbacks = self.test_callbacks
) )
return self.playbook.run() result = self.playbook.run()
print utils.bigjson(dict(events=EVENTS))
return result
def test_one(self): def test_one(self):
pb = os.path.join(self.test_dir, 'playbook1.yml') pb = os.path.join(self.test_dir, 'playbook1.yml')

View file

@ -14,6 +14,15 @@ try:
except: except:
import simplejson as json import simplejson as json
from nose.plugins.skip import SkipTest
def get_binary(name):
for directory in os.environ["PATH"].split(os.pathsep):
path = os.path.join(directory, name)
if os.path.isfile(path) and os.access(path, os.X_OK):
return path
return None
class TestRunner(unittest.TestCase): class TestRunner(unittest.TestCase):
def setUp(self): def setUp(self):
@ -29,7 +38,6 @@ class TestRunner(unittest.TestCase):
forks=1, forks=1,
background=0, background=0,
pattern='all', pattern='all',
verbose=True,
) )
self.cwd = os.getcwd() self.cwd = os.getcwd()
self.test_dir = os.path.join(self.cwd, 'test') self.test_dir = os.path.join(self.cwd, 'test')
@ -74,6 +82,8 @@ class TestRunner(unittest.TestCase):
assert "ping" in result assert "ping" in result
def test_facter(self): def test_facter(self):
if not get_binary("facter"):
raise SkipTest
result = self._run('facter',[]) result = self._run('facter',[])
assert "hostname" in result assert "hostname" in result
@ -168,12 +178,13 @@ class TestRunner(unittest.TestCase):
# almost every time so changed is always true, this just tests that # almost every time so changed is always true, this just tests that
# rewriting the file is ok # rewriting the file is ok
result = self._run('setup', [ "metadata=%s" % output, "a=2", "b=3", "c=4" ]) result = self._run('setup', [ "metadata=%s" % output, "a=2", "b=3", "c=4" ])
print "RAW RESULT=%s" % result
assert 'md5sum' in result assert 'md5sum' in result
def test_async(self): def test_async(self):
# test async launch and job status # test async launch and job status
# of any particular module # of any particular module
result = self._run('command', [ "/bin/sleep", "3" ], background=20) result = self._run('command', [ get_binary("sleep"), "3" ], background=20)
assert 'ansible_job_id' in result assert 'ansible_job_id' in result
assert 'started' in result assert 'started' in result
jid = result['ansible_job_id'] jid = result['ansible_job_id']
@ -191,13 +202,14 @@ class TestRunner(unittest.TestCase):
def test_fetch(self): def test_fetch(self):
input = self._get_test_file('sample.j2') input = self._get_test_file('sample.j2')
output = self._get_stage_file('127.0.0.2/sample.j2') output = os.path.join(self.stage_dir, '127.0.0.2', input)
result = self._run('fetch', [ "src=%s" % input, "dest=%s" % self.stage_dir ]) result = self._run('fetch', [ "src=%s" % input, "dest=%s" % self.stage_dir ])
print "output file=%s" % output
assert os.path.exists(output) assert os.path.exists(output)
assert open(input).read() == open(output).read() assert open(input).read() == open(output).read()
def test_yum(self): def test_yum(self):
if not get_binary("yum"):
raise SkipTest
result = self._run('yum', [ "list=repos" ]) result = self._run('yum', [ "list=repos" ])
assert 'failed' not in result assert 'failed' not in result

39
test/inventory_api.py Normal file
View file

@ -0,0 +1,39 @@
#!/usr/bin/env python
import json
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-l', '--list', default=False, dest="list_hosts", action="store_true")
parser.add_option('-H', '--host', default=None, dest="host")
parser.add_option('-e', '--extra-vars', default=None, dest="extra")
options, args = parser.parse_args()
systems = {
"ungouped": [ "jupiter", "saturn" ],
"greek": [ "zeus", "hera", "poseidon" ],
"norse": [ "thor", "odin", "loki" ]
}
variables = {
"thor": {
"hammer": True
}
}
if options.list_hosts == True:
print json.dumps(systems)
sys.exit(0)
if options.host is not None:
if options.extra:
k,v = options.extra.split("=")
variables[options.host][k] = v
print json.dumps(variables[options.host])
sys.exit(0)
parser.print_help()
sys.exit(1)

12
test/simple_hosts Normal file
View file

@ -0,0 +1,12 @@
jupiter
saturn
[greek]
zeus
hera:3000
poseidon
[norse]
thor
odin
loki

30
test/yaml_hosts Normal file
View file

@ -0,0 +1,30 @@
---
- jupiter
- host: saturn
vars:
moon: titan
moon2: enceladus
- zeus
- group: greek
hosts:
- zeus
- hera
- poseidon
vars:
- ansible_ssh_port: 3000
- ntp_server: olympus.example.com
- group: norse
hosts:
- host: thor
vars:
- hammer: True
- odin
- loki
- group: multiple
hosts:
- saturn